problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_24518
|
rasdani/github-patches
|
git_diff
|
apache__tvm-3413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[potential bug?] SimulatedAnnealingOptimizer will find useless candidate?
When I read the code of SimulatedAnnealingOptimize, I find the method `find_maximums` is implemented by heap, and insert some dummy elements first as following:
https://github.com/dmlc/tvm/blob/c0afc91884c1e664e19ab10f5f4898425975a756/python/tvm/autotvm/tuner/sa_model_optimizer.py#L78
But, in the code, there is no guarantee that the output list contains no dummy elements, so will it be a bug when we nearly run out the configs and can't find new points during the loop?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/tvm/autotvm/tuner/sa_model_optimizer.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # pylint: disable=consider-using-enumerate, invalid-name
18 """
19 Cost model optimizer based on simulated annealing
20 """
21
22 import heapq
23 import logging
24 import time
25
26 import numpy as np
27
28 from ..util import sample_ints
29 from .model_based_tuner import ModelOptimizer, knob2point, point2knob
30
31 logger = logging.getLogger('autotvm')
32
33 class SimulatedAnnealingOptimizer(ModelOptimizer):
34 """parallel simulated annealing optimization algorithm
35
36 Parameters
37 ----------
38 task: Task
39 The tuning task
40 n_iter: int
41 The number of iterations of simulated annealing
42 temp: float or Array of float
43 If is a single float, then use a constant temperature.
44 If is an Array, then perform linear cooling from temp[0] to temp[1]
45 early_stop: int, optional
46 Stop iteration if the optimal set do not change in `early_stop` rounds
47 log_interval: int, optional
48 Print log every `log_interval` iterations
49 """
50 def __init__(self, task, n_iter=500, temp=(1, 0), persistent=True, parallel_size=128,
51 early_stop=50, log_interval=50):
52 super(SimulatedAnnealingOptimizer, self).__init__()
53
54 self.task = task
55 self.dims = [len(x) for x in self.task.config_space.space_map.values()]
56
57 self.n_iter = n_iter
58 self.temp = temp
59 self.persistent = persistent
60 self.parallel_size = min(parallel_size, len(self.task.config_space))
61 self.early_stop = early_stop or 1e9
62 self.log_interval = log_interval
63 self.points = None
64
65 def find_maximums(self, model, num, exclusive):
66 tic = time.time()
67 temp, n_iter, early_stop, log_interval = \
68 self.temp, self.n_iter, self.early_stop, self.log_interval
69
70 if self.persistent and self.points is not None:
71 points = self.points
72 else:
73 points = np.array(sample_ints(0, len(self.task.config_space), self.parallel_size))
74
75 scores = model.predict(points)
76
77 # build heap and insert initial points
78 heap_items = [(float('-inf'), -i) for i in range(num)]
79 heapq.heapify(heap_items)
80 in_heap = set(exclusive)
81 in_heap.update([-i for i in range(num)])
82
83 for s, p in zip(scores, points):
84 if s > heap_items[0][0] and p not in in_heap:
85 pop = heapq.heapreplace(heap_items, (s, p))
86 in_heap.remove(pop[1])
87 in_heap.add(p)
88
89 k = 0
90 k_last_modify = 0
91
92 if isinstance(temp, (tuple, list, np.ndarray)):
93 t = temp[0]
94 cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)
95 else:
96 t = temp
97 cool = 0
98
99 while k < n_iter and k < k_last_modify + early_stop:
100 new_points = np.empty_like(points)
101 for i, p in enumerate(points):
102 new_points[i] = random_walk(p, self.dims)
103
104 new_scores = model.predict(new_points)
105
106 ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))
107 ac_index = np.random.random(len(ac_prob)) < ac_prob
108
109 points[ac_index] = new_points[ac_index]
110 scores[ac_index] = new_scores[ac_index]
111
112 for s, p in zip(new_scores, new_points):
113 if s > heap_items[0][0] and p not in in_heap:
114 pop = heapq.heapreplace(heap_items, (s, p))
115 in_heap.remove(pop[1])
116 in_heap.add(p)
117 k_last_modify = k
118
119 k += 1
120 t -= cool
121
122 if log_interval and k % log_interval == 0:
123 t_str = "%.2f" % t
124 logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t"
125 "elapsed: %.2f",
126 k, k_last_modify, heap_items[0][0],
127 np.max([v for v, _ in heap_items]), t_str,
128 time.time() - tic)
129
130 heap_items.sort(key=lambda item: -item[0])
131 logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\telapsed: %.2f",
132 k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)
133 logger.debug("SA Maximums: %s", heap_items)
134
135 if self.persistent:
136 self.points = points
137
138 return [x[1] for x in heap_items]
139
140 def random_walk(p, dims):
141 """random walk as local transition
142
143 Parameters
144 ----------
145 p: int
146 index of the ConfigEntity
147 dims: Array of int
148 sizes of each dimension
149
150 Returns
151 -------
152 new_p: int
153 new neighborhood index
154 """
155 # transform to knob form
156 old = point2knob(p, dims)
157 new = list(old)
158
159 # mutate
160 while new == old:
161 from_i = np.random.randint(len(old))
162 to_v = np.random.randint(dims[from_i])
163 new[from_i] = to_v
164
165 # transform to index form
166 return knob2point(new, dims)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/tvm/autotvm/tuner/sa_model_optimizer.py b/python/tvm/autotvm/tuner/sa_model_optimizer.py
--- a/python/tvm/autotvm/tuner/sa_model_optimizer.py
+++ b/python/tvm/autotvm/tuner/sa_model_optimizer.py
@@ -75,10 +75,10 @@
scores = model.predict(points)
# build heap and insert initial points
- heap_items = [(float('-inf'), -i) for i in range(num)]
+ heap_items = [(float('-inf'), - 1 - i) for i in range(num)]
heapq.heapify(heap_items)
in_heap = set(exclusive)
- in_heap.update([-i for i in range(num)])
+ in_heap.update([x for x in heap_items])
for s, p in zip(scores, points):
if s > heap_items[0][0] and p not in in_heap:
@@ -128,6 +128,7 @@
time.time() - tic)
heap_items.sort(key=lambda item: -item[0])
+ heap_items = [x for x in heap_items if x >= 0]
logger.debug("SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\telapsed: %.2f",
k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)
logger.debug("SA Maximums: %s", heap_items)
|
{"golden_diff": "diff --git a/python/tvm/autotvm/tuner/sa_model_optimizer.py b/python/tvm/autotvm/tuner/sa_model_optimizer.py\n--- a/python/tvm/autotvm/tuner/sa_model_optimizer.py\n+++ b/python/tvm/autotvm/tuner/sa_model_optimizer.py\n@@ -75,10 +75,10 @@\n scores = model.predict(points)\n \n # build heap and insert initial points\n- heap_items = [(float('-inf'), -i) for i in range(num)]\n+ heap_items = [(float('-inf'), - 1 - i) for i in range(num)]\n heapq.heapify(heap_items)\n in_heap = set(exclusive)\n- in_heap.update([-i for i in range(num)])\n+ in_heap.update([x for x in heap_items])\n \n for s, p in zip(scores, points):\n if s > heap_items[0][0] and p not in in_heap:\n@@ -128,6 +128,7 @@\n time.time() - tic)\n \n heap_items.sort(key=lambda item: -item[0])\n+ heap_items = [x for x in heap_items if x >= 0]\n logger.debug(\"SA iter: %d\\tlast_update: %d\\tmax-0: %.2f\\tmax-1: %.2f\\telapsed: %.2f\",\n k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)\n logger.debug(\"SA Maximums: %s\", heap_items)\n", "issue": "[potential bug?] SimulatedAnnealingOptimizer will find useless candidate?\nWhen I read the code of SimulatedAnnealingOptimize, I find the method `find_maximums` is implemented by heap, and insert some dummy elements first as following:\r\nhttps://github.com/dmlc/tvm/blob/c0afc91884c1e664e19ab10f5f4898425975a756/python/tvm/autotvm/tuner/sa_model_optimizer.py#L78\r\n\r\nBut, in the code, there is no guarantee that the output list contains no dummy elements, so will it be a bug when we nearly run out the configs and can't find new points during the loop?\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=consider-using-enumerate, invalid-name\n\"\"\"\nCost model optimizer based on simulated annealing\n\"\"\"\n\nimport heapq\nimport logging\nimport time\n\nimport numpy as np\n\nfrom ..util import sample_ints\nfrom .model_based_tuner import ModelOptimizer, knob2point, point2knob\n\nlogger = logging.getLogger('autotvm')\n\nclass SimulatedAnnealingOptimizer(ModelOptimizer):\n \"\"\"parallel simulated annealing optimization algorithm\n\n Parameters\n ----------\n task: Task\n The tuning task\n n_iter: int\n The number of iterations of simulated annealing\n temp: float or Array of float\n If is a single float, then use a constant temperature.\n If is an Array, then perform linear cooling from temp[0] to temp[1]\n early_stop: int, optional\n Stop iteration if the optimal set do not change in `early_stop` rounds\n log_interval: int, optional\n Print log every `log_interval` iterations\n \"\"\"\n def __init__(self, task, n_iter=500, temp=(1, 0), persistent=True, parallel_size=128,\n early_stop=50, log_interval=50):\n super(SimulatedAnnealingOptimizer, self).__init__()\n\n self.task = task\n self.dims = [len(x) for x in self.task.config_space.space_map.values()]\n\n self.n_iter = n_iter\n self.temp = temp\n self.persistent = persistent\n self.parallel_size = min(parallel_size, len(self.task.config_space))\n self.early_stop = early_stop or 1e9\n self.log_interval = log_interval\n self.points = None\n\n def find_maximums(self, model, num, exclusive):\n tic = time.time()\n temp, n_iter, early_stop, log_interval = \\\n self.temp, self.n_iter, self.early_stop, self.log_interval\n\n if self.persistent and self.points is not None:\n points = self.points\n else:\n points = np.array(sample_ints(0, len(self.task.config_space), self.parallel_size))\n\n scores = model.predict(points)\n\n # build heap and insert initial points\n heap_items = [(float('-inf'), -i) for i in range(num)]\n heapq.heapify(heap_items)\n in_heap = set(exclusive)\n in_heap.update([-i for i in range(num)])\n\n for s, p in zip(scores, points):\n if s > heap_items[0][0] and p not in in_heap:\n pop = heapq.heapreplace(heap_items, (s, p))\n in_heap.remove(pop[1])\n in_heap.add(p)\n\n k = 0\n k_last_modify = 0\n\n if isinstance(temp, (tuple, list, np.ndarray)):\n t = temp[0]\n cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)\n else:\n t = temp\n cool = 0\n\n while k < n_iter and k < k_last_modify + early_stop:\n new_points = np.empty_like(points)\n for i, p in enumerate(points):\n new_points[i] = random_walk(p, self.dims)\n\n new_scores = model.predict(new_points)\n\n ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))\n ac_index = np.random.random(len(ac_prob)) < ac_prob\n\n points[ac_index] = new_points[ac_index]\n scores[ac_index] = new_scores[ac_index]\n\n for s, p in zip(new_scores, new_points):\n if s > heap_items[0][0] and p not in in_heap:\n pop = heapq.heapreplace(heap_items, (s, p))\n in_heap.remove(pop[1])\n in_heap.add(p)\n k_last_modify = k\n\n k += 1\n t -= cool\n\n if log_interval and k % log_interval == 0:\n t_str = \"%.2f\" % t\n logger.debug(\"SA iter: %d\\tlast_update: %d\\tmax-0: %.2f\\tmax-1: %.2f\\ttemp: %s\\t\"\n \"elapsed: %.2f\",\n k, k_last_modify, heap_items[0][0],\n np.max([v for v, _ in heap_items]), t_str,\n time.time() - tic)\n\n heap_items.sort(key=lambda item: -item[0])\n logger.debug(\"SA iter: %d\\tlast_update: %d\\tmax-0: %.2f\\tmax-1: %.2f\\telapsed: %.2f\",\n k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)\n logger.debug(\"SA Maximums: %s\", heap_items)\n\n if self.persistent:\n self.points = points\n\n return [x[1] for x in heap_items]\n\ndef random_walk(p, dims):\n \"\"\"random walk as local transition\n\n Parameters\n ----------\n p: int\n index of the ConfigEntity\n dims: Array of int\n sizes of each dimension\n\n Returns\n -------\n new_p: int\n new neighborhood index\n \"\"\"\n # transform to knob form\n old = point2knob(p, dims)\n new = list(old)\n\n # mutate\n while new == old:\n from_i = np.random.randint(len(old))\n to_v = np.random.randint(dims[from_i])\n new[from_i] = to_v\n\n # transform to index form\n return knob2point(new, dims)\n", "path": "python/tvm/autotvm/tuner/sa_model_optimizer.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=consider-using-enumerate, invalid-name\n\"\"\"\nCost model optimizer based on simulated annealing\n\"\"\"\n\nimport heapq\nimport logging\nimport time\n\nimport numpy as np\n\nfrom ..util import sample_ints\nfrom .model_based_tuner import ModelOptimizer, knob2point, point2knob\n\nlogger = logging.getLogger('autotvm')\n\nclass SimulatedAnnealingOptimizer(ModelOptimizer):\n \"\"\"parallel simulated annealing optimization algorithm\n\n Parameters\n ----------\n task: Task\n The tuning task\n n_iter: int\n The number of iterations of simulated annealing\n temp: float or Array of float\n If is a single float, then use a constant temperature.\n If is an Array, then perform linear cooling from temp[0] to temp[1]\n early_stop: int, optional\n Stop iteration if the optimal set do not change in `early_stop` rounds\n log_interval: int, optional\n Print log every `log_interval` iterations\n \"\"\"\n def __init__(self, task, n_iter=500, temp=(1, 0), persistent=True, parallel_size=128,\n early_stop=50, log_interval=50):\n super(SimulatedAnnealingOptimizer, self).__init__()\n\n self.task = task\n self.dims = [len(x) for x in self.task.config_space.space_map.values()]\n\n self.n_iter = n_iter\n self.temp = temp\n self.persistent = persistent\n self.parallel_size = min(parallel_size, len(self.task.config_space))\n self.early_stop = early_stop or 1e9\n self.log_interval = log_interval\n self.points = None\n\n def find_maximums(self, model, num, exclusive):\n tic = time.time()\n temp, n_iter, early_stop, log_interval = \\\n self.temp, self.n_iter, self.early_stop, self.log_interval\n\n if self.persistent and self.points is not None:\n points = self.points\n else:\n points = np.array(sample_ints(0, len(self.task.config_space), self.parallel_size))\n\n scores = model.predict(points)\n\n # build heap and insert initial points\n heap_items = [(float('-inf'), - 1 - i) for i in range(num)]\n heapq.heapify(heap_items)\n in_heap = set(exclusive)\n in_heap.update([x for x in heap_items])\n\n for s, p in zip(scores, points):\n if s > heap_items[0][0] and p not in in_heap:\n pop = heapq.heapreplace(heap_items, (s, p))\n in_heap.remove(pop[1])\n in_heap.add(p)\n\n k = 0\n k_last_modify = 0\n\n if isinstance(temp, (tuple, list, np.ndarray)):\n t = temp[0]\n cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)\n else:\n t = temp\n cool = 0\n\n while k < n_iter and k < k_last_modify + early_stop:\n new_points = np.empty_like(points)\n for i, p in enumerate(points):\n new_points[i] = random_walk(p, self.dims)\n\n new_scores = model.predict(new_points)\n\n ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))\n ac_index = np.random.random(len(ac_prob)) < ac_prob\n\n points[ac_index] = new_points[ac_index]\n scores[ac_index] = new_scores[ac_index]\n\n for s, p in zip(new_scores, new_points):\n if s > heap_items[0][0] and p not in in_heap:\n pop = heapq.heapreplace(heap_items, (s, p))\n in_heap.remove(pop[1])\n in_heap.add(p)\n k_last_modify = k\n\n k += 1\n t -= cool\n\n if log_interval and k % log_interval == 0:\n t_str = \"%.2f\" % t\n logger.debug(\"SA iter: %d\\tlast_update: %d\\tmax-0: %.2f\\tmax-1: %.2f\\ttemp: %s\\t\"\n \"elapsed: %.2f\",\n k, k_last_modify, heap_items[0][0],\n np.max([v for v, _ in heap_items]), t_str,\n time.time() - tic)\n\n heap_items.sort(key=lambda item: -item[0])\n heap_items = [x for x in heap_items if x >= 0]\n logger.debug(\"SA iter: %d\\tlast_update: %d\\tmax-0: %.2f\\tmax-1: %.2f\\telapsed: %.2f\",\n k, k_last_modify, heap_items[-1][0], heap_items[0][0], time.time() - tic)\n logger.debug(\"SA Maximums: %s\", heap_items)\n\n if self.persistent:\n self.points = points\n\n return [x[1] for x in heap_items]\n\ndef random_walk(p, dims):\n \"\"\"random walk as local transition\n\n Parameters\n ----------\n p: int\n index of the ConfigEntity\n dims: Array of int\n sizes of each dimension\n\n Returns\n -------\n new_p: int\n new neighborhood index\n \"\"\"\n # transform to knob form\n old = point2knob(p, dims)\n new = list(old)\n\n # mutate\n while new == old:\n from_i = np.random.randint(len(old))\n to_v = np.random.randint(dims[from_i])\n new[from_i] = to_v\n\n # transform to index form\n return knob2point(new, dims)\n", "path": "python/tvm/autotvm/tuner/sa_model_optimizer.py"}]}
| 2,236 | 343 |
gh_patches_debug_38178
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-442
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate & Remove X-PyPI-Last-Serial on File Responses
Ideally we'd like to not serve the file responses via the Warehouse web application, however we currently need to do this in order to add the `X-PyPI-Last-Serial` responses. Bandersnatch is currently using this, so it would be great to investigate what it's being used for and if we can safely deprecate and remove it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/packaging/views.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import fs.errors
14
15 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
16 from pyramid.response import FileIter, Response
17 from pyramid.view import view_config
18 from sqlalchemy import func
19 from sqlalchemy.orm.exc import NoResultFound
20
21 from warehouse.accounts.models import User
22 from warehouse.cache.http import cache_control
23 from warehouse.cache.origin import origin_cache
24 from warehouse.packaging.interfaces import IDownloadStatService
25 from warehouse.packaging.models import Release, File, Role, JournalEntry
26
27
28 @view_config(
29 route_name="packaging.project",
30 renderer="packaging/detail.html",
31 decorator=[
32 cache_control(1 * 24 * 60 * 60), # 1 day
33 origin_cache(7 * 24 * 60 * 60), # 7 days
34 ],
35 )
36 def project_detail(project, request):
37 if project.name != request.matchdict.get("name", project.name):
38 return HTTPMovedPermanently(
39 request.current_route_url(name=project.name),
40 )
41
42 try:
43 release = project.releases.order_by(
44 Release._pypi_ordering.desc()
45 ).limit(1).one()
46 except NoResultFound:
47 raise HTTPNotFound from None
48
49 return release_detail(release, request)
50
51
52 @view_config(
53 route_name="packaging.release",
54 renderer="packaging/detail.html",
55 decorator=[
56 cache_control(7 * 24 * 60 * 60), # 7 days
57 origin_cache(30 * 24 * 60 * 60), # 30 days
58 ],
59 )
60 def release_detail(release, request):
61 project = release.project
62
63 if project.name != request.matchdict.get("name", project.name):
64 return HTTPMovedPermanently(
65 request.current_route_url(name=project.name),
66 )
67
68 # Get all of the registered versions for this Project, in order of newest
69 # to oldest.
70 all_releases = (
71 project.releases
72 .with_entities(Release.version, Release.created)
73 .order_by(Release._pypi_ordering.desc())
74 .all()
75 )
76
77 # Get all of the maintainers for this project.
78 maintainers = [
79 r.user
80 for r in (
81 request.db.query(Role)
82 .join(User)
83 .filter(Role.project == project)
84 .distinct(User.username)
85 .order_by(User.username)
86 .all()
87 )
88 ]
89
90 stats_svc = request.find_service(IDownloadStatService)
91
92 return {
93 "project": project,
94 "release": release,
95 "files": release.files.all(),
96 "all_releases": all_releases,
97 "maintainers": maintainers,
98 "download_stats": {
99 "daily": stats_svc.get_daily_stats(project.name),
100 "weekly": stats_svc.get_weekly_stats(project.name),
101 "monthly": stats_svc.get_monthly_stats(project.name),
102 },
103 }
104
105
106 @view_config(
107 route_name="packaging.file",
108 decorator=[
109 cache_control(365 * 24 * 60 * 60), # 1 year
110 ],
111 )
112 def packages(request):
113 # The amount of logic that we can do in this view is very limited, this
114 # view needs to be able to be handled by Fastly directly hitting S3 instead
115 # of actually hitting this view. This more or less means that we're limited
116 # to just setting headers and serving the actual file. In addition the
117 # headers that we can set, have to be able to be determined at file upload
118 # time instead of dynamically.
119
120 # Grab the path of the file that we're attempting to serve
121 path = request.matchdict["path"]
122
123 # We need to look up the File that is associated with this path, either the
124 # package path or the pgp path. If that doesn't exist then we'll bail out
125 # early with a 404.
126 try:
127 file_ = (
128 request.db.query(File)
129 .filter((File.path == path) | (File.pgp_path == path))
130 .one()
131 )
132 except NoResultFound:
133 raise HTTPNotFound from None
134
135 # If this request is for a PGP signature, and the file doesn't have a PGP
136 # signature, then we can go ahead and 404 now before hitting the file
137 # storage.
138 if path == file_.pgp_path and not file_.has_pgp_signature:
139 raise HTTPNotFound
140
141 # Try to open the file, streaming if possible, and if this file doesn't
142 # exist then we'll return a 404 error. However we'll log an error because
143 # if the database thinks we have a file, then a file should exist here.
144 try:
145 # TODO: We need to use mode="rb" here because this is a binary file
146 # and we don't want Python to attempt to decode it. However S3FS
147 # checks explicitly for mode="r-" to support streaming access.
148 # We need to get S3FS so that it support rb- as well as r-.
149 f = request.registry["filesystems"]["packages"].open(path, mode="rb")
150 except fs.errors.ResourceNotFoundError:
151 # TODO: Log an error here, this file doesn't exists for some reason,
152 # but it should because the database thinks it should.
153 raise HTTPNotFound from None
154
155 # If the path we're accessing is the path for the package itself, as
156 # opposed to the path for the signature, then we can include a
157 # Content-Length header.
158 content_length = None
159 if path == file_.path:
160 content_length = file_.size
161
162 resp = Response(
163 # If we have a wsgi.file_wrapper, we'll want to use that so that, if
164 # possible, this will use an optimized method of sending. Otherwise
165 # we'll just use Pyramid's FileIter as a fallback.
166 app_iter=request.environ.get("wsgi.file_wrapper", FileIter)(f),
167 # We use application/octet-stream instead of something nicer because
168 # different HTTP libraries will treat different combinations of
169 # Content-Type and Content-Encoding differently. The only thing that
170 # works sanely across all things without having something in the middle
171 # decide it can decompress the result to "help" the end user is with
172 # Content-Type: applicaton/octet-stream and no Content-Encoding.
173 content_type="application/octet-stream",
174 content_encoding=None,
175 # We need to specify an ETag for this response. Since ETags compared
176 # between URLs have no meaning (and thus, is safe for two URLs to share
177 # the same ETag) we will just use the MD5 hash of the package as our
178 # ETag.
179 etag=file_.md5_digest,
180 # Similarly to the ETag header, we'll just use the date that the file
181 # was uploaded as the Last-Modified header.
182 last_modified=file_.upload_time,
183 # If we have a Content-Length, we'll go ahead and use it here to
184 # hopefully enable the server and clients alike to be smarter about how
185 # they handle downloading this response.
186 content_length=content_length,
187 )
188
189 # We also need to get the X-PyPI-Last-Serial for the project associated
190 # with this file. Bandersnatch (and other mirroring clients) will use this
191 # to determine what kind of action to take if the MD5 hash does not match
192 # what it expected.
193 serial = (
194 request.db.query(func.max(JournalEntry.id))
195 .filter(JournalEntry.name == file_.name)
196 .scalar()
197 )
198 resp.headers["X-PyPI-Last-Serial"] = serial or 0
199
200 return resp
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py
--- a/warehouse/packaging/views.py
+++ b/warehouse/packaging/views.py
@@ -15,14 +15,13 @@
from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
from pyramid.response import FileIter, Response
from pyramid.view import view_config
-from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from warehouse.accounts.models import User
from warehouse.cache.http import cache_control
from warehouse.cache.origin import origin_cache
from warehouse.packaging.interfaces import IDownloadStatService
-from warehouse.packaging.models import Release, File, Role, JournalEntry
+from warehouse.packaging.models import Release, File, Role
@view_config(
@@ -113,9 +112,7 @@
# The amount of logic that we can do in this view is very limited, this
# view needs to be able to be handled by Fastly directly hitting S3 instead
# of actually hitting this view. This more or less means that we're limited
- # to just setting headers and serving the actual file. In addition the
- # headers that we can set, have to be able to be determined at file upload
- # time instead of dynamically.
+ # to just serving the actual file.
# Grab the path of the file that we're attempting to serve
path = request.matchdict["path"]
@@ -159,7 +156,7 @@
if path == file_.path:
content_length = file_.size
- resp = Response(
+ return Response(
# If we have a wsgi.file_wrapper, we'll want to use that so that, if
# possible, this will use an optimized method of sending. Otherwise
# we'll just use Pyramid's FileIter as a fallback.
@@ -185,16 +182,3 @@
# they handle downloading this response.
content_length=content_length,
)
-
- # We also need to get the X-PyPI-Last-Serial for the project associated
- # with this file. Bandersnatch (and other mirroring clients) will use this
- # to determine what kind of action to take if the MD5 hash does not match
- # what it expected.
- serial = (
- request.db.query(func.max(JournalEntry.id))
- .filter(JournalEntry.name == file_.name)
- .scalar()
- )
- resp.headers["X-PyPI-Last-Serial"] = serial or 0
-
- return resp
|
{"golden_diff": "diff --git a/warehouse/packaging/views.py b/warehouse/packaging/views.py\n--- a/warehouse/packaging/views.py\n+++ b/warehouse/packaging/views.py\n@@ -15,14 +15,13 @@\n from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\n from pyramid.response import FileIter, Response\n from pyramid.view import view_config\n-from sqlalchemy import func\n from sqlalchemy.orm.exc import NoResultFound\n \n from warehouse.accounts.models import User\n from warehouse.cache.http import cache_control\n from warehouse.cache.origin import origin_cache\n from warehouse.packaging.interfaces import IDownloadStatService\n-from warehouse.packaging.models import Release, File, Role, JournalEntry\n+from warehouse.packaging.models import Release, File, Role\n \n \n @view_config(\n@@ -113,9 +112,7 @@\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n- # to just setting headers and serving the actual file. In addition the\n- # headers that we can set, have to be able to be determined at file upload\n- # time instead of dynamically.\n+ # to just serving the actual file.\n \n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n@@ -159,7 +156,7 @@\n if path == file_.path:\n content_length = file_.size\n \n- resp = Response(\n+ return Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n@@ -185,16 +182,3 @@\n # they handle downloading this response.\n content_length=content_length,\n )\n-\n- # We also need to get the X-PyPI-Last-Serial for the project associated\n- # with this file. Bandersnatch (and other mirroring clients) will use this\n- # to determine what kind of action to take if the MD5 hash does not match\n- # what it expected.\n- serial = (\n- request.db.query(func.max(JournalEntry.id))\n- .filter(JournalEntry.name == file_.name)\n- .scalar()\n- )\n- resp.headers[\"X-PyPI-Last-Serial\"] = serial or 0\n-\n- return resp\n", "issue": "Deprecate & Remove X-PyPI-Last-Serial on File Responses\nIdeally we'd like to not serve the file responses via the Warehouse web application, however we currently need to do this in order to add the `X-PyPI-Last-Serial` responses. Bandersnatch is currently using this, so it would be great to investigate what it's being used for and if we can safely deprecate and remove it.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fs.errors\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import FileIter, Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService\nfrom warehouse.packaging.models import Release, File, Role, JournalEntry\n\n\n@view_config(\n route_name=\"packaging.project\",\n renderer=\"packaging/detail.html\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(7 * 24 * 60 * 60), # 7 days\n ],\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_url(name=project.name),\n )\n\n try:\n release = project.releases.order_by(\n Release._pypi_ordering.desc()\n ).limit(1).one()\n except NoResultFound:\n raise HTTPNotFound from None\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n renderer=\"packaging/detail.html\",\n decorator=[\n cache_control(7 * 24 * 60 * 60), # 7 days\n origin_cache(30 * 24 * 60 * 60), # 30 days\n ],\n)\ndef release_detail(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_url(name=project.name),\n )\n\n # Get all of the registered versions for this Project, in order of newest\n # to oldest.\n all_releases = (\n project.releases\n .with_entities(Release.version, Release.created)\n .order_by(Release._pypi_ordering.desc())\n .all()\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n \"download_stats\": {\n \"daily\": stats_svc.get_daily_stats(project.name),\n \"weekly\": stats_svc.get_weekly_stats(project.name),\n \"monthly\": stats_svc.get_monthly_stats(project.name),\n },\n }\n\n\n@view_config(\n route_name=\"packaging.file\",\n decorator=[\n cache_control(365 * 24 * 60 * 60), # 1 year\n ],\n)\ndef packages(request):\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n # to just setting headers and serving the actual file. In addition the\n # headers that we can set, have to be able to be determined at file upload\n # time instead of dynamically.\n\n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n\n # We need to look up the File that is associated with this path, either the\n # package path or the pgp path. If that doesn't exist then we'll bail out\n # early with a 404.\n try:\n file_ = (\n request.db.query(File)\n .filter((File.path == path) | (File.pgp_path == path))\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound from None\n\n # If this request is for a PGP signature, and the file doesn't have a PGP\n # signature, then we can go ahead and 404 now before hitting the file\n # storage.\n if path == file_.pgp_path and not file_.has_pgp_signature:\n raise HTTPNotFound\n\n # Try to open the file, streaming if possible, and if this file doesn't\n # exist then we'll return a 404 error. However we'll log an error because\n # if the database thinks we have a file, then a file should exist here.\n try:\n # TODO: We need to use mode=\"rb\" here because this is a binary file\n # and we don't want Python to attempt to decode it. However S3FS\n # checks explicitly for mode=\"r-\" to support streaming access.\n # We need to get S3FS so that it support rb- as well as r-.\n f = request.registry[\"filesystems\"][\"packages\"].open(path, mode=\"rb\")\n except fs.errors.ResourceNotFoundError:\n # TODO: Log an error here, this file doesn't exists for some reason,\n # but it should because the database thinks it should.\n raise HTTPNotFound from None\n\n # If the path we're accessing is the path for the package itself, as\n # opposed to the path for the signature, then we can include a\n # Content-Length header.\n content_length = None\n if path == file_.path:\n content_length = file_.size\n\n resp = Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n app_iter=request.environ.get(\"wsgi.file_wrapper\", FileIter)(f),\n # We use application/octet-stream instead of something nicer because\n # different HTTP libraries will treat different combinations of\n # Content-Type and Content-Encoding differently. The only thing that\n # works sanely across all things without having something in the middle\n # decide it can decompress the result to \"help\" the end user is with\n # Content-Type: applicaton/octet-stream and no Content-Encoding.\n content_type=\"application/octet-stream\",\n content_encoding=None,\n # We need to specify an ETag for this response. Since ETags compared\n # between URLs have no meaning (and thus, is safe for two URLs to share\n # the same ETag) we will just use the MD5 hash of the package as our\n # ETag.\n etag=file_.md5_digest,\n # Similarly to the ETag header, we'll just use the date that the file\n # was uploaded as the Last-Modified header.\n last_modified=file_.upload_time,\n # If we have a Content-Length, we'll go ahead and use it here to\n # hopefully enable the server and clients alike to be smarter about how\n # they handle downloading this response.\n content_length=content_length,\n )\n\n # We also need to get the X-PyPI-Last-Serial for the project associated\n # with this file. Bandersnatch (and other mirroring clients) will use this\n # to determine what kind of action to take if the MD5 hash does not match\n # what it expected.\n serial = (\n request.db.query(func.max(JournalEntry.id))\n .filter(JournalEntry.name == file_.name)\n .scalar()\n )\n resp.headers[\"X-PyPI-Last-Serial\"] = serial or 0\n\n return resp\n", "path": "warehouse/packaging/views.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport fs.errors\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.response import FileIter, Response\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.accounts.models import User\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService\nfrom warehouse.packaging.models import Release, File, Role\n\n\n@view_config(\n route_name=\"packaging.project\",\n renderer=\"packaging/detail.html\",\n decorator=[\n cache_control(1 * 24 * 60 * 60), # 1 day\n origin_cache(7 * 24 * 60 * 60), # 7 days\n ],\n)\ndef project_detail(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_url(name=project.name),\n )\n\n try:\n release = project.releases.order_by(\n Release._pypi_ordering.desc()\n ).limit(1).one()\n except NoResultFound:\n raise HTTPNotFound from None\n\n return release_detail(release, request)\n\n\n@view_config(\n route_name=\"packaging.release\",\n renderer=\"packaging/detail.html\",\n decorator=[\n cache_control(7 * 24 * 60 * 60), # 7 days\n origin_cache(30 * 24 * 60 * 60), # 30 days\n ],\n)\ndef release_detail(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_url(name=project.name),\n )\n\n # Get all of the registered versions for this Project, in order of newest\n # to oldest.\n all_releases = (\n project.releases\n .with_entities(Release.version, Release.created)\n .order_by(Release._pypi_ordering.desc())\n .all()\n )\n\n # Get all of the maintainers for this project.\n maintainers = [\n r.user\n for r in (\n request.db.query(Role)\n .join(User)\n .filter(Role.project == project)\n .distinct(User.username)\n .order_by(User.username)\n .all()\n )\n ]\n\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"project\": project,\n \"release\": release,\n \"files\": release.files.all(),\n \"all_releases\": all_releases,\n \"maintainers\": maintainers,\n \"download_stats\": {\n \"daily\": stats_svc.get_daily_stats(project.name),\n \"weekly\": stats_svc.get_weekly_stats(project.name),\n \"monthly\": stats_svc.get_monthly_stats(project.name),\n },\n }\n\n\n@view_config(\n route_name=\"packaging.file\",\n decorator=[\n cache_control(365 * 24 * 60 * 60), # 1 year\n ],\n)\ndef packages(request):\n # The amount of logic that we can do in this view is very limited, this\n # view needs to be able to be handled by Fastly directly hitting S3 instead\n # of actually hitting this view. This more or less means that we're limited\n # to just serving the actual file.\n\n # Grab the path of the file that we're attempting to serve\n path = request.matchdict[\"path\"]\n\n # We need to look up the File that is associated with this path, either the\n # package path or the pgp path. If that doesn't exist then we'll bail out\n # early with a 404.\n try:\n file_ = (\n request.db.query(File)\n .filter((File.path == path) | (File.pgp_path == path))\n .one()\n )\n except NoResultFound:\n raise HTTPNotFound from None\n\n # If this request is for a PGP signature, and the file doesn't have a PGP\n # signature, then we can go ahead and 404 now before hitting the file\n # storage.\n if path == file_.pgp_path and not file_.has_pgp_signature:\n raise HTTPNotFound\n\n # Try to open the file, streaming if possible, and if this file doesn't\n # exist then we'll return a 404 error. However we'll log an error because\n # if the database thinks we have a file, then a file should exist here.\n try:\n # TODO: We need to use mode=\"rb\" here because this is a binary file\n # and we don't want Python to attempt to decode it. However S3FS\n # checks explicitly for mode=\"r-\" to support streaming access.\n # We need to get S3FS so that it support rb- as well as r-.\n f = request.registry[\"filesystems\"][\"packages\"].open(path, mode=\"rb\")\n except fs.errors.ResourceNotFoundError:\n # TODO: Log an error here, this file doesn't exists for some reason,\n # but it should because the database thinks it should.\n raise HTTPNotFound from None\n\n # If the path we're accessing is the path for the package itself, as\n # opposed to the path for the signature, then we can include a\n # Content-Length header.\n content_length = None\n if path == file_.path:\n content_length = file_.size\n\n return Response(\n # If we have a wsgi.file_wrapper, we'll want to use that so that, if\n # possible, this will use an optimized method of sending. Otherwise\n # we'll just use Pyramid's FileIter as a fallback.\n app_iter=request.environ.get(\"wsgi.file_wrapper\", FileIter)(f),\n # We use application/octet-stream instead of something nicer because\n # different HTTP libraries will treat different combinations of\n # Content-Type and Content-Encoding differently. The only thing that\n # works sanely across all things without having something in the middle\n # decide it can decompress the result to \"help\" the end user is with\n # Content-Type: applicaton/octet-stream and no Content-Encoding.\n content_type=\"application/octet-stream\",\n content_encoding=None,\n # We need to specify an ETag for this response. Since ETags compared\n # between URLs have no meaning (and thus, is safe for two URLs to share\n # the same ETag) we will just use the MD5 hash of the package as our\n # ETag.\n etag=file_.md5_digest,\n # Similarly to the ETag header, we'll just use the date that the file\n # was uploaded as the Last-Modified header.\n last_modified=file_.upload_time,\n # If we have a Content-Length, we'll go ahead and use it here to\n # hopefully enable the server and clients alike to be smarter about how\n # they handle downloading this response.\n content_length=content_length,\n )\n", "path": "warehouse/packaging/views.py"}]}
| 2,668 | 575 |
gh_patches_debug_25252
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-3016
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`OrderedGraph.subgraph` does not maintain the order of the nodes
A subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet:
```python
graph = nx.OrderedGraph()
nodes = list(range(10))
random.shuffle(nodes)
graph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5)
# We create a selection in the same order as the initial graph keys
to_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4]
subgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8)
# We create a selection in a different order
subgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5)
```
From what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `networkx/classes/ordered.py`
Content:
```
1 """
2 Consistently ordered variants of the default base classes.
3
4 The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of
5 nodes and edges. The order of node reporting agrees with node adding, but for
6 edges, the order is not necessarily the order that the edges were added.
7
8 In general, you should use the default (i.e., unordered) graph classes.
9 However, there are times (e.g., when testing) when you may need the
10 order preserved.
11 """
12 from collections import OrderedDict
13
14 from .graph import Graph
15 from .multigraph import MultiGraph
16 from .digraph import DiGraph
17 from .multidigraph import MultiDiGraph
18
19 __all__ = []
20
21 __all__.extend([
22 'OrderedGraph',
23 'OrderedDiGraph',
24 'OrderedMultiGraph',
25 'OrderedMultiDiGraph',
26 ])
27
28
29 class OrderedGraph(Graph):
30 """Consistently ordered variant of :class:`~networkx.Graph`."""
31 node_dict_factory = OrderedDict
32 adjlist_outer_dict_factory = OrderedDict
33 adjlist_inner_dict_factory = OrderedDict
34 edge_attr_dict_factory = OrderedDict
35
36 def fresh_copy(self):
37 """Return a fresh copy graph with the same data structure.
38
39 A fresh copy has no nodes, edges or graph attributes. It is
40 the same data structure as the current graph. This method is
41 typically used to create an empty version of the graph.
42 """
43 return OrderedGraph()
44
45
46 class OrderedDiGraph(DiGraph):
47 """Consistently ordered variant of :class:`~networkx.DiGraph`."""
48 node_dict_factory = OrderedDict
49 adjlist_outer_dict_factory = OrderedDict
50 adjlist_inner_dict_factory = OrderedDict
51 edge_attr_dict_factory = OrderedDict
52
53 def fresh_copy(self):
54 """Return a fresh copy graph with the same data structure.
55
56 A fresh copy has no nodes, edges or graph attributes. It is
57 the same data structure as the current graph. This method is
58 typically used to create an empty version of the graph.
59 """
60 return OrderedDiGraph()
61
62
63 class OrderedMultiGraph(MultiGraph):
64 """Consistently ordered variant of :class:`~networkx.MultiGraph`."""
65 node_dict_factory = OrderedDict
66 adjlist_outer_dict_factory = OrderedDict
67 adjlist_inner_dict_factory = OrderedDict
68 edge_key_dict_factory = OrderedDict
69 edge_attr_dict_factory = OrderedDict
70
71 def fresh_copy(self):
72 """Return a fresh copy graph with the same data structure.
73
74 A fresh copy has no nodes, edges or graph attributes. It is
75 the same data structure as the current graph. This method is
76 typically used to create an empty version of the graph.
77 """
78 return OrderedMultiGraph()
79
80
81 class OrderedMultiDiGraph(MultiDiGraph):
82 """Consistently ordered variant of :class:`~networkx.MultiDiGraph`."""
83 node_dict_factory = OrderedDict
84 adjlist_outer_dict_factory = OrderedDict
85 adjlist_inner_dict_factory = OrderedDict
86 edge_key_dict_factory = OrderedDict
87 edge_attr_dict_factory = OrderedDict
88
89 def fresh_copy(self):
90 """Return a fresh copy graph with the same data structure.
91
92 A fresh copy has no nodes, edges or graph attributes. It is
93 the same data structure as the current graph. This method is
94 typically used to create an empty version of the graph.
95 """
96 return OrderedMultiDiGraph()
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py
--- a/networkx/classes/ordered.py
+++ b/networkx/classes/ordered.py
@@ -1,5 +1,10 @@
"""
Consistently ordered variants of the default base classes.
+Note that if you are using Python 3.6, you shouldn't need these classes
+because the dicts in Python 3.6 are ordered.
+Note also that there are many differing expectations for the word "ordered"
+and that these classes may not provide the order you expect.
+The intent here is to give a consistent order not a particular order.
The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of
nodes and edges. The order of node reporting agrees with node adding, but for
@@ -8,6 +13,17 @@
In general, you should use the default (i.e., unordered) graph classes.
However, there are times (e.g., when testing) when you may need the
order preserved.
+
+Special care is required when using subgraphs of the Ordered classes.
+The order of nodes in the subclass is not necessarily the same order
+as the original class. In general it is probably better to avoid using
+subgraphs and replace with code similar to:
+
+ # instead of SG = G.subgraph(ordered_nodes)
+ SG=nx.OrderedGraph()
+ SG.add_nodes_from(ordered_nodes)
+ SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)
+
"""
from collections import OrderedDict
|
{"golden_diff": "diff --git a/networkx/classes/ordered.py b/networkx/classes/ordered.py\n--- a/networkx/classes/ordered.py\n+++ b/networkx/classes/ordered.py\n@@ -1,5 +1,10 @@\n \"\"\"\n Consistently ordered variants of the default base classes.\n+Note that if you are using Python 3.6, you shouldn't need these classes\n+because the dicts in Python 3.6 are ordered.\n+Note also that there are many differing expectations for the word \"ordered\"\n+and that these classes may not provide the order you expect.\n+The intent here is to give a consistent order not a particular order.\n \n The Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\n nodes and edges. The order of node reporting agrees with node adding, but for\n@@ -8,6 +13,17 @@\n In general, you should use the default (i.e., unordered) graph classes.\n However, there are times (e.g., when testing) when you may need the\n order preserved.\n+\n+Special care is required when using subgraphs of the Ordered classes.\n+The order of nodes in the subclass is not necessarily the same order\n+as the original class. In general it is probably better to avoid using\n+subgraphs and replace with code similar to:\n+\n+ # instead of SG = G.subgraph(ordered_nodes)\n+ SG=nx.OrderedGraph()\n+ SG.add_nodes_from(ordered_nodes)\n+ SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)\n+\n \"\"\"\n from collections import OrderedDict\n", "issue": "`OrderedGraph.subgraph` does not maintain the order of the nodes\nA subgraph built from a `OrderedGraph` should keep the order of the nodes, yet nodes in the subgraph are neither in the order of the initial graph, nor in the order of the selection. The issue can be seen from the following snippet:\r\n\r\n```python\r\ngraph = nx.OrderedGraph()\r\nnodes = list(range(10))\r\nrandom.shuffle(nodes)\r\ngraph.add_nodes_from(nodes) # key order is (7, 2, 1, 9, 0, 8, 6, 4, 3, 5)\r\n\r\n# We create a selection in the same order as the initial graph keys\r\nto_keep = [key for key in graph if key % 2 == 0] # [2, 0, 8, 6, 4]\r\nsubgraph = graph.subgraph(to_keep) # (0, 2, 4, 6, 8)\r\n\r\n# We create a selection in a different order\r\nsubgraph = graph.subgraph([5, 3, 1]) # (1, 3, 5)\r\n```\r\n\r\nFrom what I see, the issue is due to `Graph.subgraph` passing the selection to `nx.filters.show_nodes` that transforms it to a set. The nodes in the subgraph are then in the order of the set; as sets do not preserve the order, the order of the initial graph if not preserved.\n", "before_files": [{"content": "\"\"\"\nConsistently ordered variants of the default base classes.\n\nThe Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\nnodes and edges. The order of node reporting agrees with node adding, but for\nedges, the order is not necessarily the order that the edges were added.\n\nIn general, you should use the default (i.e., unordered) graph classes.\nHowever, there are times (e.g., when testing) when you may need the\norder preserved.\n\"\"\"\nfrom collections import OrderedDict\n\nfrom .graph import Graph\nfrom .multigraph import MultiGraph\nfrom .digraph import DiGraph\nfrom .multidigraph import MultiDiGraph\n\n__all__ = []\n\n__all__.extend([\n 'OrderedGraph',\n 'OrderedDiGraph',\n 'OrderedMultiGraph',\n 'OrderedMultiDiGraph',\n])\n\n\nclass OrderedGraph(Graph):\n \"\"\"Consistently ordered variant of :class:`~networkx.Graph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedGraph()\n\n\nclass OrderedDiGraph(DiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.DiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedDiGraph()\n\n\nclass OrderedMultiGraph(MultiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiGraph()\n\n\nclass OrderedMultiDiGraph(MultiDiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiDiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiDiGraph()\n", "path": "networkx/classes/ordered.py"}], "after_files": [{"content": "\"\"\"\nConsistently ordered variants of the default base classes.\nNote that if you are using Python 3.6, you shouldn't need these classes\nbecause the dicts in Python 3.6 are ordered.\nNote also that there are many differing expectations for the word \"ordered\"\nand that these classes may not provide the order you expect.\nThe intent here is to give a consistent order not a particular order.\n\nThe Ordered (Di/Multi/MultiDi) Graphs give a consistent order for reporting of\nnodes and edges. The order of node reporting agrees with node adding, but for\nedges, the order is not necessarily the order that the edges were added.\n\nIn general, you should use the default (i.e., unordered) graph classes.\nHowever, there are times (e.g., when testing) when you may need the\norder preserved.\n\nSpecial care is required when using subgraphs of the Ordered classes.\nThe order of nodes in the subclass is not necessarily the same order\nas the original class. In general it is probably better to avoid using\nsubgraphs and replace with code similar to:\n\n # instead of SG = G.subgraph(ordered_nodes)\n SG=nx.OrderedGraph()\n SG.add_nodes_from(ordered_nodes)\n SG.add_edges_from((u, v) for (u, v) in G.edges() if u in SG if v in SG)\n\n\"\"\"\nfrom collections import OrderedDict\n\nfrom .graph import Graph\nfrom .multigraph import MultiGraph\nfrom .digraph import DiGraph\nfrom .multidigraph import MultiDiGraph\n\n__all__ = []\n\n__all__.extend([\n 'OrderedGraph',\n 'OrderedDiGraph',\n 'OrderedMultiGraph',\n 'OrderedMultiDiGraph',\n])\n\n\nclass OrderedGraph(Graph):\n \"\"\"Consistently ordered variant of :class:`~networkx.Graph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedGraph()\n\n\nclass OrderedDiGraph(DiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.DiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedDiGraph()\n\n\nclass OrderedMultiGraph(MultiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiGraph()\n\n\nclass OrderedMultiDiGraph(MultiDiGraph):\n \"\"\"Consistently ordered variant of :class:`~networkx.MultiDiGraph`.\"\"\"\n node_dict_factory = OrderedDict\n adjlist_outer_dict_factory = OrderedDict\n adjlist_inner_dict_factory = OrderedDict\n edge_key_dict_factory = OrderedDict\n edge_attr_dict_factory = OrderedDict\n\n def fresh_copy(self):\n \"\"\"Return a fresh copy graph with the same data structure.\n\n A fresh copy has no nodes, edges or graph attributes. It is\n the same data structure as the current graph. This method is\n typically used to create an empty version of the graph.\n \"\"\"\n return OrderedMultiDiGraph()\n", "path": "networkx/classes/ordered.py"}]}
| 1,474 | 352 |
gh_patches_debug_16045
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-1601
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Separate XLIFF export options
### Motivation
<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->
Apparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks.
### Proposed Solution
<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->
- Move the public option to the top (since this is the most used)
- Reword the "all pages" option to make it more obvious that this includes drafts
- Maybe add an exclamation mark or similar to the "all pages" option
### Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->
### Additional Context
<!-- Add any other information or screenshots about the feature request here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/views/pages/page_bulk_actions.py`
Content:
```
1 import logging
2
3 from django.contrib import messages
4 from django.shortcuts import get_object_or_404
5 from django.utils.translation import ugettext as _
6 from django.views.generic.list import MultipleObjectMixin
7
8 from ....xliff.utils import pages_to_xliff_file
9 from ...models import Page
10 from ...utils.pdf_utils import generate_pdf
11 from ...utils.translation_utils import ugettext_many_lazy as __
12 from ..bulk_action_views import BulkActionView
13
14 logger = logging.getLogger(__name__)
15
16
17 class PageBulkActionMixin(MultipleObjectMixin):
18 """
19 Mixin for page bulk actions
20 """
21
22 #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`
23 model = Page
24
25
26 # pylint: disable=too-many-ancestors
27 class GeneratePdfView(PageBulkActionMixin, BulkActionView):
28 """
29 Bulk action for generating a PDF document of the content
30 """
31
32 #: Whether the view requires change permissions
33 require_change_permission = False
34 #: Whether the public translation objects should be prefetched
35 prefetch_public_translations = True
36
37 def post(self, request, *args, **kwargs):
38 r"""
39 Apply the bulk action on every item in the queryset and redirect
40
41 :param request: The current request
42 :type request: ~django.http.HttpResponse
43
44 :param \*args: The supplied arguments
45 :type \*args: list
46
47 :param \**kwargs: The supplied keyword arguments
48 :type \**kwargs: dict
49
50 :return: The redirect
51 :rtype: ~django.http.HttpResponseRedirect
52 """
53 # Generate PDF document and redirect to it
54 return generate_pdf(
55 request.region,
56 kwargs.get("language_slug"),
57 self.get_queryset(),
58 )
59
60
61 # pylint: disable=too-many-ancestors
62 class ExportXliffView(PageBulkActionMixin, BulkActionView):
63 """
64 Bulk action for generating XLIFF files for translations
65 """
66
67 #: Whether only public translation should be exported
68 only_public = False
69 #: Whether the view requires change permissions
70 require_change_permission = False
71
72 def post(self, request, *args, **kwargs):
73 r"""
74 Function for handling a pdf export request for pages.
75 The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`
76
77 :param request: The current request
78 :type request: ~django.http.HttpResponse
79
80 :param \*args: The supplied arguments
81 :type \*args: list
82
83 :param \**kwargs: The supplied keyword arguments
84 :type \**kwargs: dict
85
86 :return: The redirect
87 :rtype: ~django.http.HttpResponseRedirect
88 """
89 target_language = get_object_or_404(
90 self.request.region.language_tree_nodes,
91 language__slug=kwargs.get("language_slug"),
92 parent__isnull=False,
93 ).language
94
95 xliff_file_url = pages_to_xliff_file(
96 request, self.get_queryset(), target_language, only_public=self.only_public
97 )
98 if xliff_file_url:
99 # Insert link with automatic download into success message
100 messages.success(
101 request,
102 __(
103 _("XLIFF file for translation to {} successfully created.").format(
104 target_language
105 ),
106 _(
107 "If the download does not start automatically, please click {}here{}."
108 ).format(
109 f"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>",
110 "</a>",
111 ),
112 ),
113 )
114
115 # Let the base view handle the redirect
116 return super().post(request, *args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py
--- a/integreat_cms/cms/views/pages/page_bulk_actions.py
+++ b/integreat_cms/cms/views/pages/page_bulk_actions.py
@@ -100,9 +100,13 @@
messages.success(
request,
__(
- _("XLIFF file for translation to {} successfully created.").format(
- target_language
- ),
+ _(
+ "XLIFF file with published pages only for translation to {} successfully created."
+ ).format(target_language)
+ if self.only_public
+ else _(
+ "XLIFF file with unpublished and published pages for translation to {} successfully created."
+ ).format(target_language),
_(
"If the download does not start automatically, please click {}here{}."
).format(
|
{"golden_diff": "diff --git a/integreat_cms/cms/views/pages/page_bulk_actions.py b/integreat_cms/cms/views/pages/page_bulk_actions.py\n--- a/integreat_cms/cms/views/pages/page_bulk_actions.py\n+++ b/integreat_cms/cms/views/pages/page_bulk_actions.py\n@@ -100,9 +100,13 @@\n messages.success(\n request,\n __(\n- _(\"XLIFF file for translation to {} successfully created.\").format(\n- target_language\n- ),\n+ _(\n+ \"XLIFF file with published pages only for translation to {} successfully created.\"\n+ ).format(target_language)\n+ if self.only_public\n+ else _(\n+ \"XLIFF file with unpublished and published pages for translation to {} successfully created.\"\n+ ).format(target_language),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n", "issue": "Separate XLIFF export options\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\nApparently, the two options to export XLIFF files (either all pages or only public pages) are too similar and lead to accidental misclicks.\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\n- Move the public option to the top (since this is the most used)\r\n- Reword the \"all pages\" option to make it more obvious that this includes drafts\r\n- Maybe add an exclamation mark or similar to the \"all pages\" option\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom ....xliff.utils import pages_to_xliff_file\nfrom ...models import Page\nfrom ...utils.pdf_utils import generate_pdf\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..bulk_action_views import BulkActionView\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageBulkActionMixin(MultipleObjectMixin):\n \"\"\"\n Mixin for page bulk actions\n \"\"\"\n\n #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`\n model = Page\n\n\n# pylint: disable=too-many-ancestors\nclass GeneratePdfView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating a PDF document of the content\n \"\"\"\n\n #: Whether the view requires change permissions\n require_change_permission = False\n #: Whether the public translation objects should be prefetched\n prefetch_public_translations = True\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply the bulk action on every item in the queryset and redirect\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # Generate PDF document and redirect to it\n return generate_pdf(\n request.region,\n kwargs.get(\"language_slug\"),\n self.get_queryset(),\n )\n\n\n# pylint: disable=too-many-ancestors\nclass ExportXliffView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating XLIFF files for translations\n \"\"\"\n\n #: Whether only public translation should be exported\n only_public = False\n #: Whether the view requires change permissions\n require_change_permission = False\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Function for handling a pdf export request for pages.\n The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n target_language = get_object_or_404(\n self.request.region.language_tree_nodes,\n language__slug=kwargs.get(\"language_slug\"),\n parent__isnull=False,\n ).language\n\n xliff_file_url = pages_to_xliff_file(\n request, self.get_queryset(), target_language, only_public=self.only_public\n )\n if xliff_file_url:\n # Insert link with automatic download into success message\n messages.success(\n request,\n __(\n _(\"XLIFF file for translation to {} successfully created.\").format(\n target_language\n ),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n f\"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>\",\n \"</a>\",\n ),\n ),\n )\n\n # Let the base view handle the redirect\n return super().post(request, *args, **kwargs)\n", "path": "integreat_cms/cms/views/pages/page_bulk_actions.py"}], "after_files": [{"content": "import logging\n\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom ....xliff.utils import pages_to_xliff_file\nfrom ...models import Page\nfrom ...utils.pdf_utils import generate_pdf\nfrom ...utils.translation_utils import ugettext_many_lazy as __\nfrom ..bulk_action_views import BulkActionView\n\nlogger = logging.getLogger(__name__)\n\n\nclass PageBulkActionMixin(MultipleObjectMixin):\n \"\"\"\n Mixin for page bulk actions\n \"\"\"\n\n #: The model of this :class:`~integreat_cms.cms.views.bulk_action_views.BulkActionView`\n model = Page\n\n\n# pylint: disable=too-many-ancestors\nclass GeneratePdfView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating a PDF document of the content\n \"\"\"\n\n #: Whether the view requires change permissions\n require_change_permission = False\n #: Whether the public translation objects should be prefetched\n prefetch_public_translations = True\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Apply the bulk action on every item in the queryset and redirect\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n # Generate PDF document and redirect to it\n return generate_pdf(\n request.region,\n kwargs.get(\"language_slug\"),\n self.get_queryset(),\n )\n\n\n# pylint: disable=too-many-ancestors\nclass ExportXliffView(PageBulkActionMixin, BulkActionView):\n \"\"\"\n Bulk action for generating XLIFF files for translations\n \"\"\"\n\n #: Whether only public translation should be exported\n only_public = False\n #: Whether the view requires change permissions\n require_change_permission = False\n\n def post(self, request, *args, **kwargs):\n r\"\"\"\n Function for handling a pdf export request for pages.\n The pages get extracted from request.GET attribute and the request is forwarded to :func:`~integreat_cms.cms.utils.pdf_utils.generate_pdf`\n\n :param request: The current request\n :type request: ~django.http.HttpResponse\n\n :param \\*args: The supplied arguments\n :type \\*args: list\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n\n :return: The redirect\n :rtype: ~django.http.HttpResponseRedirect\n \"\"\"\n target_language = get_object_or_404(\n self.request.region.language_tree_nodes,\n language__slug=kwargs.get(\"language_slug\"),\n parent__isnull=False,\n ).language\n\n xliff_file_url = pages_to_xliff_file(\n request, self.get_queryset(), target_language, only_public=self.only_public\n )\n if xliff_file_url:\n # Insert link with automatic download into success message\n messages.success(\n request,\n __(\n _(\n \"XLIFF file with published pages only for translation to {} successfully created.\"\n ).format(target_language)\n if self.only_public\n else _(\n \"XLIFF file with unpublished and published pages for translation to {} successfully created.\"\n ).format(target_language),\n _(\n \"If the download does not start automatically, please click {}here{}.\"\n ).format(\n f\"<a data-auto-download href='{xliff_file_url}' class='font-bold underline hover:no-underline' download>\",\n \"</a>\",\n ),\n ),\n )\n\n # Let the base view handle the redirect\n return super().post(request, *args, **kwargs)\n", "path": "integreat_cms/cms/views/pages/page_bulk_actions.py"}]}
| 1,518 | 199 |
gh_patches_debug_40548
|
rasdani/github-patches
|
git_diff
|
huggingface__huggingface_hub-285
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add sentence similarity for spaCy
Computing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this.
https://spacy.io/api/doc#similarity
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py`
Content:
```
1 from typing import Dict, List, Union
2
3 from app.pipelines import Pipeline
4
5
6 class SentenceSimilarityPipeline(Pipeline):
7 def __init__(
8 self,
9 model_id: str,
10 ):
11 # IMPLEMENT_THIS
12 # Preload all the elements you are going to need at inference.
13 # For instance your model, processors, tokenizer that might be needed.
14 # This function is only called once, so do all the heavy processing I/O here
15 raise NotImplementedError(
16 "Please implement SentenceSimilarityPipeline __init__ function"
17 )
18
19 def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:
20 """
21 Args:
22 inputs (:obj:`dict`):
23 a dictionary containing two keys, 'source_sentence' mapping
24 to the sentence that will be compared against all the others,
25 and 'sentences', mapping to a list of strings to which the
26 source will be compared.
27 Return:
28 A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.
29 """
30 # IMPLEMENT_THIS
31 raise NotImplementedError(
32 "Please implement SentenceSimilarityPipeline __call__ function"
33 )
34
```
Path: `api-inference-community/docker_images/spacy/app/main.py`
Content:
```
1 import functools
2 import logging
3 import os
4 from typing import Dict, Type
5
6 from api_inference_community.routes import pipeline_route, status_ok
7 from app.pipelines import (
8 Pipeline,
9 TextClassificationPipeline,
10 TokenClassificationPipeline,
11 )
12 from starlette.applications import Starlette
13 from starlette.middleware import Middleware
14 from starlette.middleware.gzip import GZipMiddleware
15 from starlette.routing import Route
16
17
18 TASK = os.getenv("TASK")
19 MODEL_ID = os.getenv("MODEL_ID")
20
21
22 logger = logging.getLogger(__name__)
23
24
25 # Add the allowed tasks
26 # Supported tasks are:
27 # - text-generation
28 # - text-classification
29 # - token-classification
30 # - translation
31 # - summarization
32 # - automatic-speech-recognition
33 # - ...
34 # For instance
35 # from app.pipelines import AutomaticSpeechRecognitionPipeline
36 # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
37 # You can check the requirements and expectations of each pipelines in their respective
38 # directories. Implement directly within the directories.
39 ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
40 "token-classification": TokenClassificationPipeline,
41 "text-classification": TextClassificationPipeline,
42 }
43
44
45 @functools.lru_cache()
46 def get_pipeline() -> Pipeline:
47 task = os.environ["TASK"]
48 model_id = os.environ["MODEL_ID"]
49 if task not in ALLOWED_TASKS:
50 raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
51 return ALLOWED_TASKS[task](model_id)
52
53
54 routes = [
55 Route("/{whatever:path}", status_ok),
56 Route("/{whatever:path}", pipeline_route, methods=["POST"]),
57 ]
58
59 middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
60 if os.environ.get("DEBUG", "") == "1":
61 from starlette.middleware.cors import CORSMiddleware
62
63 middleware.append(
64 Middleware(
65 CORSMiddleware,
66 allow_origins=["*"],
67 allow_headers=["*"],
68 allow_methods=["*"],
69 )
70 )
71
72 app = Starlette(routes=routes, middleware=middleware)
73
74
75 @app.on_event("startup")
76 async def startup_event():
77 logger = logging.getLogger("uvicorn.access")
78 handler = logging.StreamHandler()
79 handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
80 logger.handlers = [handler]
81
82 # Link between `api-inference-community` and framework code.
83 app.get_pipeline = get_pipeline
84 try:
85 get_pipeline()
86 except Exception:
87 # We can fail so we can show exception later.
88 pass
89
90
91 if __name__ == "__main__":
92 try:
93 get_pipeline()
94 except Exception:
95 # We can fail so we can show exception later.
96 pass
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py
--- a/api-inference-community/docker_images/spacy/app/main.py
+++ b/api-inference-community/docker_images/spacy/app/main.py
@@ -6,6 +6,7 @@
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import (
Pipeline,
+ SentenceSimilarityPipeline,
TextClassificationPipeline,
TokenClassificationPipeline,
)
@@ -30,6 +31,7 @@
# - translation
# - summarization
# - automatic-speech-recognition
+# - sentence-similarity
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
@@ -39,6 +41,7 @@
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline,
"text-classification": TextClassificationPipeline,
+ "sentence-similarity": SentenceSimilarityPipeline,
}
diff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py
--- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py
+++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py
@@ -1,3 +1,6 @@
+import os
+import subprocess
+import sys
from typing import Dict, List, Union
from app.pipelines import Pipeline
@@ -8,14 +11,23 @@
self,
model_id: str,
):
- # IMPLEMENT_THIS
- # Preload all the elements you are going to need at inference.
- # For instance your model, processors, tokenizer that might be needed.
- # This function is only called once, so do all the heavy processing I/O here
- raise NotImplementedError(
- "Please implement SentenceSimilarityPipeline __init__ function"
+ # At the time, only public models from spaCy are allowed in the inference API.
+ full_model_path = model_id.split("/")
+ if len(full_model_path) != 2:
+ raise ValueError(
+ f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)"
+ )
+ namespace, model_name = full_model_path
+ package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl"
+ cache_dir = os.environ["PIP_CACHE"]
+ subprocess.check_call(
+ [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package]
)
+ import spacy
+
+ self.model = spacy.load(model_name)
+
def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:
"""
Args:
@@ -27,7 +39,12 @@
Return:
A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.
"""
- # IMPLEMENT_THIS
- raise NotImplementedError(
- "Please implement SentenceSimilarityPipeline __call__ function"
- )
+ source_sentence = inputs["source_sentence"]
+ source_doc = self.model(source_sentence)
+
+ similarities = []
+ for sentence in inputs["sentences"]:
+ search_doc = self.model(sentence)
+ similarities.append(source_doc.similarity(search_doc))
+
+ return similarities
|
{"golden_diff": "diff --git a/api-inference-community/docker_images/spacy/app/main.py b/api-inference-community/docker_images/spacy/app/main.py\n--- a/api-inference-community/docker_images/spacy/app/main.py\n+++ b/api-inference-community/docker_images/spacy/app/main.py\n@@ -6,6 +6,7 @@\n from api_inference_community.routes import pipeline_route, status_ok\n from app.pipelines import (\n Pipeline,\n+ SentenceSimilarityPipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n )\n@@ -30,6 +31,7 @@\n # - translation\n # - summarization\n # - automatic-speech-recognition\n+# - sentence-similarity\n # - ...\n # For instance\n # from app.pipelines import AutomaticSpeechRecognitionPipeline\n@@ -39,6 +41,7 @@\n ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n+ \"sentence-similarity\": SentenceSimilarityPipeline,\n }\n \n \ndiff --git a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n--- a/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n+++ b/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py\n@@ -1,3 +1,6 @@\n+import os\n+import subprocess\n+import sys\n from typing import Dict, List, Union\n \n from app.pipelines import Pipeline\n@@ -8,14 +11,23 @@\n self,\n model_id: str,\n ):\n- # IMPLEMENT_THIS\n- # Preload all the elements you are going to need at inference.\n- # For instance your model, processors, tokenizer that might be needed.\n- # This function is only called once, so do all the heavy processing I/O here\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __init__ function\"\n+ # At the time, only public models from spaCy are allowed in the inference API.\n+ full_model_path = model_id.split(\"/\")\n+ if len(full_model_path) != 2:\n+ raise ValueError(\n+ f\"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)\"\n+ )\n+ namespace, model_name = full_model_path\n+ package = f\"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl\"\n+ cache_dir = os.environ[\"PIP_CACHE\"]\n+ subprocess.check_call(\n+ [sys.executable, \"-m\", \"pip\", \"install\", \"--cache-dir\", cache_dir, package]\n )\n \n+ import spacy\n+\n+ self.model = spacy.load(model_name)\n+\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n@@ -27,7 +39,12 @@\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n- # IMPLEMENT_THIS\n- raise NotImplementedError(\n- \"Please implement SentenceSimilarityPipeline __call__ function\"\n- )\n+ source_sentence = inputs[\"source_sentence\"]\n+ source_doc = self.model(source_sentence)\n+\n+ similarities = []\n+ for sentence in inputs[\"sentences\"]:\n+ search_doc = self.model(sentence)\n+ similarities.append(source_doc.similarity(search_doc))\n+\n+ return similarities\n", "issue": "Add sentence similarity for spaCy\nComputing similarity is simple with `spaCy` and there is an existing [template pipeline](https://github.com/huggingface/huggingface_hub/blob/main/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py) for sentence similarity at which can be modified for this. \r\n\r\nhttps://spacy.io/api/doc#similarity\n", "before_files": [{"content": "from typing import Dict, List, Union\n\nfrom app.pipelines import Pipeline\n\n\nclass SentenceSimilarityPipeline(Pipeline):\n def __init__(\n self,\n model_id: str,\n ):\n # IMPLEMENT_THIS\n # Preload all the elements you are going to need at inference.\n # For instance your model, processors, tokenizer that might be needed.\n # This function is only called once, so do all the heavy processing I/O here\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __init__ function\"\n )\n\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n inputs (:obj:`dict`):\n a dictionary containing two keys, 'source_sentence' mapping\n to the sentence that will be compared against all the others,\n and 'sentences', mapping to a list of strings to which the\n source will be compared.\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n # IMPLEMENT_THIS\n raise NotImplementedError(\n \"Please implement SentenceSimilarityPipeline __call__ function\"\n )\n", "path": "api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py"}, {"content": "import functools\nimport logging\nimport os\nfrom typing import Dict, Type\n\nfrom api_inference_community.routes import pipeline_route, status_ok\nfrom app.pipelines import (\n Pipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n)\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.routing import Route\n\n\nTASK = os.getenv(\"TASK\")\nMODEL_ID = os.getenv(\"MODEL_ID\")\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Add the allowed tasks\n# Supported tasks are:\n# - text-generation\n# - text-classification\n# - token-classification\n# - translation\n# - summarization\n# - automatic-speech-recognition\n# - ...\n# For instance\n# from app.pipelines import AutomaticSpeechRecognitionPipeline\n# ALLOWED_TASKS = {\"automatic-speech-recognition\": AutomaticSpeechRecognitionPipeline}\n# You can check the requirements and expectations of each pipelines in their respective\n# directories. Implement directly within the directories.\nALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n}\n\n\[email protected]_cache()\ndef get_pipeline() -> Pipeline:\n task = os.environ[\"TASK\"]\n model_id = os.environ[\"MODEL_ID\"]\n if task not in ALLOWED_TASKS:\n raise EnvironmentError(f\"{task} is not a valid pipeline for model : {model_id}\")\n return ALLOWED_TASKS[task](model_id)\n\n\nroutes = [\n Route(\"/{whatever:path}\", status_ok),\n Route(\"/{whatever:path}\", pipeline_route, methods=[\"POST\"]),\n]\n\nmiddleware = [Middleware(GZipMiddleware, minimum_size=1000)]\nif os.environ.get(\"DEBUG\", \"\") == \"1\":\n from starlette.middleware.cors import CORSMiddleware\n\n middleware.append(\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_headers=[\"*\"],\n allow_methods=[\"*\"],\n )\n )\n\napp = Starlette(routes=routes, middleware=middleware)\n\n\[email protected]_event(\"startup\")\nasync def startup_event():\n logger = logging.getLogger(\"uvicorn.access\")\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n logger.handlers = [handler]\n\n # Link between `api-inference-community` and framework code.\n app.get_pipeline = get_pipeline\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n\n\nif __name__ == \"__main__\":\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n", "path": "api-inference-community/docker_images/spacy/app/main.py"}], "after_files": [{"content": "import os\nimport subprocess\nimport sys\nfrom typing import Dict, List, Union\n\nfrom app.pipelines import Pipeline\n\n\nclass SentenceSimilarityPipeline(Pipeline):\n def __init__(\n self,\n model_id: str,\n ):\n # At the time, only public models from spaCy are allowed in the inference API.\n full_model_path = model_id.split(\"/\")\n if len(full_model_path) != 2:\n raise ValueError(\n f\"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)\"\n )\n namespace, model_name = full_model_path\n package = f\"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl\"\n cache_dir = os.environ[\"PIP_CACHE\"]\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"--cache-dir\", cache_dir, package]\n )\n\n import spacy\n\n self.model = spacy.load(model_name)\n\n def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:\n \"\"\"\n Args:\n inputs (:obj:`dict`):\n a dictionary containing two keys, 'source_sentence' mapping\n to the sentence that will be compared against all the others,\n and 'sentences', mapping to a list of strings to which the\n source will be compared.\n Return:\n A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.\n \"\"\"\n source_sentence = inputs[\"source_sentence\"]\n source_doc = self.model(source_sentence)\n\n similarities = []\n for sentence in inputs[\"sentences\"]:\n search_doc = self.model(sentence)\n similarities.append(source_doc.similarity(search_doc))\n\n return similarities\n", "path": "api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py"}, {"content": "import functools\nimport logging\nimport os\nfrom typing import Dict, Type\n\nfrom api_inference_community.routes import pipeline_route, status_ok\nfrom app.pipelines import (\n Pipeline,\n SentenceSimilarityPipeline,\n TextClassificationPipeline,\n TokenClassificationPipeline,\n)\nfrom starlette.applications import Starlette\nfrom starlette.middleware import Middleware\nfrom starlette.middleware.gzip import GZipMiddleware\nfrom starlette.routing import Route\n\n\nTASK = os.getenv(\"TASK\")\nMODEL_ID = os.getenv(\"MODEL_ID\")\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Add the allowed tasks\n# Supported tasks are:\n# - text-generation\n# - text-classification\n# - token-classification\n# - translation\n# - summarization\n# - automatic-speech-recognition\n# - sentence-similarity\n# - ...\n# For instance\n# from app.pipelines import AutomaticSpeechRecognitionPipeline\n# ALLOWED_TASKS = {\"automatic-speech-recognition\": AutomaticSpeechRecognitionPipeline}\n# You can check the requirements and expectations of each pipelines in their respective\n# directories. Implement directly within the directories.\nALLOWED_TASKS: Dict[str, Type[Pipeline]] = {\n \"token-classification\": TokenClassificationPipeline,\n \"text-classification\": TextClassificationPipeline,\n \"sentence-similarity\": SentenceSimilarityPipeline,\n}\n\n\[email protected]_cache()\ndef get_pipeline() -> Pipeline:\n task = os.environ[\"TASK\"]\n model_id = os.environ[\"MODEL_ID\"]\n if task not in ALLOWED_TASKS:\n raise EnvironmentError(f\"{task} is not a valid pipeline for model : {model_id}\")\n return ALLOWED_TASKS[task](model_id)\n\n\nroutes = [\n Route(\"/{whatever:path}\", status_ok),\n Route(\"/{whatever:path}\", pipeline_route, methods=[\"POST\"]),\n]\n\nmiddleware = [Middleware(GZipMiddleware, minimum_size=1000)]\nif os.environ.get(\"DEBUG\", \"\") == \"1\":\n from starlette.middleware.cors import CORSMiddleware\n\n middleware.append(\n Middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_headers=[\"*\"],\n allow_methods=[\"*\"],\n )\n )\n\napp = Starlette(routes=routes, middleware=middleware)\n\n\[email protected]_event(\"startup\")\nasync def startup_event():\n logger = logging.getLogger(\"uvicorn.access\")\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\"))\n logger.handlers = [handler]\n\n # Link between `api-inference-community` and framework code.\n app.get_pipeline = get_pipeline\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n\n\nif __name__ == \"__main__\":\n try:\n get_pipeline()\n except Exception:\n # We can fail so we can show exception later.\n pass\n", "path": "api-inference-community/docker_images/spacy/app/main.py"}]}
| 1,458 | 776 |
gh_patches_debug_29797
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1953
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error handler should not capture exceptions in DEBUG mode
## Description
Based on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150).
Improve backend's debugability via two changes:
1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON;
2. stop wrapping unhandled exceptions in 4999 errors.
For some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/exception_handlers.py`
Content:
```
1 import warnings
2
3 from django.conf import settings
4 from django.db import IntegrityError as DjangoIntegrityError
5 from django.utils.encoding import force_str
6 from rest_framework.views import exception_handler
7 from rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT
8 from sqlalchemy.exc import IntegrityError, ProgrammingError
9
10 from db.types.exceptions import UnsupportedTypeException
11 from mathesar.api.exceptions.database_exceptions import (
12 base_exceptions as base_api_exceptions,
13 exceptions as database_api_exceptions,
14 )
15 from mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions
16 from mathesar.api.exceptions.error_codes import ErrorCodes
17 from mathesar.api.exceptions.exception_mappers import integrity_error_mapper
18 from mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception
19 from mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError
20
21 exception_map = {
22 IntegrityError: integrity_error_mapper,
23 DjangoIntegrityError: integrity_error_mapper,
24 UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),
25 ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),
26 URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),
27 URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),
28 URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)
29 }
30
31
32 def standardize_error_response(data):
33 for index, error in enumerate(data):
34 if 'code' in error:
35 if error['code'] is not None and str(error['code']) != 'None':
36 data[index]['code'] = int(error['code'])
37 else:
38 data[index]['code'] = ErrorCodes.UnknownError.value
39 if 'detail' not in error:
40 data[index]['detail'] = error.pop('details', {})
41 return data
42
43
44 def mathesar_exception_handler(exc, context):
45 response = exception_handler(exc, context)
46 # DRF default exception handler does not handle non API errors,
47 # So we convert it to proper API response
48 if not response:
49 # Check if we have an equivalent API exception that is able to convert the exception to proper error
50 mapped_exception_class = exception_map.get(exc.__class__)
51 if mapped_exception_class is None:
52 if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):
53 mapped_exception_class = get_default_api_exception
54 else:
55 raise exc
56 api_exception = mapped_exception_class(exc)
57 response = exception_handler(api_exception, context)
58
59 if response is not None:
60 # Check if conforms to the api spec
61 if is_pretty(response.data):
62 # Validation exception converts error_codes from integer to string, we need to convert it back into
63 response.data = standardize_error_response(response.data)
64 return response
65 # Certain error raised by drf automatically don't follow the api error spec,
66 # so we convert those into proper format
67 else:
68 warnings.warn("Error Response does not conform to the api spec. Please handle the exception properly")
69 error_code = FRIENDLY_EXCEPTION_DICT.get(
70 exc.__class__.__name__, None
71 )
72 if error_code is None and settings.MATHESAR_MODE != "PRODUCTION":
73 raise Exception("Error Response does not conform to the api spec. Please handle the exception properly")
74
75 # Errors thrown by drf automatically
76 if isinstance(response.data, dict):
77 error_message = response.data.pop('detail', '')
78
79 response_data = {}
80 response_data['code'] = error_code
81 response_data['message'] = error_message
82 response_data['details'] = {'exception': force_str(exc)}
83 response.data = [response_data]
84 return response
85
86
87 def is_pretty(data):
88 if not isinstance(data, list):
89 return False
90 else:
91 for error_details in data:
92 if (
93 not isinstance(error_details, dict)
94 or 'code' not in error_details
95 or 'message' not in error_details
96 ):
97 return False
98 return True
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py
--- a/mathesar/exception_handlers.py
+++ b/mathesar/exception_handlers.py
@@ -1,4 +1,5 @@
import warnings
+import traceback
from django.conf import settings
from django.db import IntegrityError as DjangoIntegrityError
@@ -38,6 +39,9 @@
data[index]['code'] = ErrorCodes.UnknownError.value
if 'detail' not in error:
data[index]['detail'] = error.pop('details', {})
+ # Adds a Stack-trace of the error for better debugging
+ if settings.MATHESAR_MODE == 'DEVELOPMENT':
+ data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())
return data
@@ -80,6 +84,9 @@
response_data['code'] = error_code
response_data['message'] = error_message
response_data['details'] = {'exception': force_str(exc)}
+ # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT
+ if settings.MATHESAR_MODE == 'DEVELOPMENT':
+ response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())
response.data = [response_data]
return response
@@ -96,3 +103,8 @@
):
return False
return True
+
+
+def reformat_stacktrace(stacktrace):
+ stacktrace_list = stacktrace.splitlines()[1:]
+ return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]
|
{"golden_diff": "diff --git a/mathesar/exception_handlers.py b/mathesar/exception_handlers.py\n--- a/mathesar/exception_handlers.py\n+++ b/mathesar/exception_handlers.py\n@@ -1,4 +1,5 @@\n import warnings\n+import traceback\n \n from django.conf import settings\n from django.db import IntegrityError as DjangoIntegrityError\n@@ -38,6 +39,9 @@\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n+ # Adds a Stack-trace of the error for better debugging\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n return data\n \n \n@@ -80,6 +84,9 @@\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n+ # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT\n+ if settings.MATHESAR_MODE == 'DEVELOPMENT':\n+ response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n response.data = [response_data]\n return response\n \n@@ -96,3 +103,8 @@\n ):\n return False\n return True\n+\n+\n+def reformat_stacktrace(stacktrace):\n+ stacktrace_list = stacktrace.splitlines()[1:]\n+ return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]\n", "issue": "Error handler should not capture exceptions in DEBUG mode\n## Description\r\nBased on the [conclusion from the discussion](https://github.com/centerofci/mathesar/discussions/1150).\r\n\r\nImprove backend's debugability via two changes:\r\n1. when backend is in debug-mode, add a pretty-printed stacktrace alongside the current error JSON;\r\n2. stop wrapping unhandled exceptions in 4999 errors.\r\n\r\nFor some context, we're catching backend exceptions and wrapping them in custom errors so as to provide a uniform error interface to API clients. As it is now it has detrimental effects on debugging, because the resulting error messages don't include stacktraces. Above changes aim to address that.\n", "before_files": [{"content": "import warnings\n\nfrom django.conf import settings\nfrom django.db import IntegrityError as DjangoIntegrityError\nfrom django.utils.encoding import force_str\nfrom rest_framework.views import exception_handler\nfrom rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT\nfrom sqlalchemy.exc import IntegrityError, ProgrammingError\n\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.exceptions.database_exceptions import (\n base_exceptions as base_api_exceptions,\n exceptions as database_api_exceptions,\n)\nfrom mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.exception_mappers import integrity_error_mapper\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception\nfrom mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError\n\nexception_map = {\n IntegrityError: integrity_error_mapper,\n DjangoIntegrityError: integrity_error_mapper,\n UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),\n ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),\n URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),\n URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),\n URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)\n}\n\n\ndef standardize_error_response(data):\n for index, error in enumerate(data):\n if 'code' in error:\n if error['code'] is not None and str(error['code']) != 'None':\n data[index]['code'] = int(error['code'])\n else:\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n return data\n\n\ndef mathesar_exception_handler(exc, context):\n response = exception_handler(exc, context)\n # DRF default exception handler does not handle non API errors,\n # So we convert it to proper API response\n if not response:\n # Check if we have an equivalent API exception that is able to convert the exception to proper error\n mapped_exception_class = exception_map.get(exc.__class__)\n if mapped_exception_class is None:\n if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):\n mapped_exception_class = get_default_api_exception\n else:\n raise exc\n api_exception = mapped_exception_class(exc)\n response = exception_handler(api_exception, context)\n\n if response is not None:\n # Check if conforms to the api spec\n if is_pretty(response.data):\n # Validation exception converts error_codes from integer to string, we need to convert it back into\n response.data = standardize_error_response(response.data)\n return response\n # Certain error raised by drf automatically don't follow the api error spec,\n # so we convert those into proper format\n else:\n warnings.warn(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n error_code = FRIENDLY_EXCEPTION_DICT.get(\n exc.__class__.__name__, None\n )\n if error_code is None and settings.MATHESAR_MODE != \"PRODUCTION\":\n raise Exception(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n\n # Errors thrown by drf automatically\n if isinstance(response.data, dict):\n error_message = response.data.pop('detail', '')\n\n response_data = {}\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n response.data = [response_data]\n return response\n\n\ndef is_pretty(data):\n if not isinstance(data, list):\n return False\n else:\n for error_details in data:\n if (\n not isinstance(error_details, dict)\n or 'code' not in error_details\n or 'message' not in error_details\n ):\n return False\n return True\n", "path": "mathesar/exception_handlers.py"}], "after_files": [{"content": "import warnings\nimport traceback\n\nfrom django.conf import settings\nfrom django.db import IntegrityError as DjangoIntegrityError\nfrom django.utils.encoding import force_str\nfrom rest_framework.views import exception_handler\nfrom rest_framework_friendly_errors.settings import FRIENDLY_EXCEPTION_DICT\nfrom sqlalchemy.exc import IntegrityError, ProgrammingError\n\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.exceptions.database_exceptions import (\n base_exceptions as base_api_exceptions,\n exceptions as database_api_exceptions,\n)\nfrom mathesar.api.exceptions.data_import_exceptions import exceptions as data_import_api_exceptions\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.exception_mappers import integrity_error_mapper\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import get_default_api_exception\nfrom mathesar.errors import URLDownloadError, URLNotReachable, URLInvalidContentTypeError\n\nexception_map = {\n IntegrityError: integrity_error_mapper,\n DjangoIntegrityError: integrity_error_mapper,\n UnsupportedTypeException: lambda exc: database_api_exceptions.UnsupportedTypeAPIException(exc),\n ProgrammingError: lambda exc: base_api_exceptions.ProgrammingAPIException(exc),\n URLDownloadError: lambda exc: data_import_api_exceptions.URLDownloadErrorAPIException(exc),\n URLNotReachable: lambda exc: data_import_api_exceptions.URLNotReachableAPIException(exc),\n URLInvalidContentTypeError: lambda exc: data_import_api_exceptions.URLInvalidContentTypeAPIException(exc)\n}\n\n\ndef standardize_error_response(data):\n for index, error in enumerate(data):\n if 'code' in error:\n if error['code'] is not None and str(error['code']) != 'None':\n data[index]['code'] = int(error['code'])\n else:\n data[index]['code'] = ErrorCodes.UnknownError.value\n if 'detail' not in error:\n data[index]['detail'] = error.pop('details', {})\n # Adds a Stack-trace of the error for better debugging\n if settings.MATHESAR_MODE == 'DEVELOPMENT':\n data[index]['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n return data\n\n\ndef mathesar_exception_handler(exc, context):\n response = exception_handler(exc, context)\n # DRF default exception handler does not handle non API errors,\n # So we convert it to proper API response\n if not response:\n # Check if we have an equivalent API exception that is able to convert the exception to proper error\n mapped_exception_class = exception_map.get(exc.__class__)\n if mapped_exception_class is None:\n if getattr(settings, 'MATHESAR_CAPTURE_UNHANDLED_EXCEPTION', False):\n mapped_exception_class = get_default_api_exception\n else:\n raise exc\n api_exception = mapped_exception_class(exc)\n response = exception_handler(api_exception, context)\n\n if response is not None:\n # Check if conforms to the api spec\n if is_pretty(response.data):\n # Validation exception converts error_codes from integer to string, we need to convert it back into\n response.data = standardize_error_response(response.data)\n return response\n # Certain error raised by drf automatically don't follow the api error spec,\n # so we convert those into proper format\n else:\n warnings.warn(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n error_code = FRIENDLY_EXCEPTION_DICT.get(\n exc.__class__.__name__, None\n )\n if error_code is None and settings.MATHESAR_MODE != \"PRODUCTION\":\n raise Exception(\"Error Response does not conform to the api spec. Please handle the exception properly\")\n\n # Errors thrown by drf automatically\n if isinstance(response.data, dict):\n error_message = response.data.pop('detail', '')\n\n response_data = {}\n response_data['code'] = error_code\n response_data['message'] = error_message\n response_data['details'] = {'exception': force_str(exc)}\n # Stacktrace should only be returned if MATHESAR_MODE is set to DEVELOPMENT\n if settings.MATHESAR_MODE == 'DEVELOPMENT':\n response_data['stacktrace'] = reformat_stacktrace(traceback.format_exc())\n response.data = [response_data]\n return response\n\n\ndef is_pretty(data):\n if not isinstance(data, list):\n return False\n else:\n for error_details in data:\n if (\n not isinstance(error_details, dict)\n or 'code' not in error_details\n or 'message' not in error_details\n ):\n return False\n return True\n\n\ndef reformat_stacktrace(stacktrace):\n stacktrace_list = stacktrace.splitlines()[1:]\n return [f'{i + 1}. {line.strip()}' for i, line in enumerate(stacktrace_list)]\n", "path": "mathesar/exception_handlers.py"}]}
| 1,448 | 356 |
gh_patches_debug_33714
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-3095
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DmenuRun doesn't take colors from extension_defaults
This is split from https://github.com/qtile/qtile/issues/2730
Qtile version: 0.18.2.dev130+g3fe5e0d1
The problem starts in commit: https://github.com/qtile/qtile/commit/d7016e1dc27680437d89efb322d5e7db03e6b8fe
## test case
```
EzKey("M-r", lazy.run_extension(extension.DmenuRun()), desc='dmenu run'),
...
extension_defaults = dict(
background='#000000',
foreground='#00ff00',
selected_background='#00ff00',
selected_foreground='#000000'
)
```
No errors
DmenuRun doesn't take colors from extension_defaults
This is split from https://github.com/qtile/qtile/issues/2730
Qtile version: 0.18.2.dev130+g3fe5e0d1
The problem starts in commit: https://github.com/qtile/qtile/commit/d7016e1dc27680437d89efb322d5e7db03e6b8fe
## test case
```
EzKey("M-r", lazy.run_extension(extension.DmenuRun()), desc='dmenu run'),
...
extension_defaults = dict(
background='#000000',
foreground='#00ff00',
selected_background='#00ff00',
selected_foreground='#000000'
)
```
No errors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/extension/command_set.py`
Content:
```
1 # Copyright (C) 2018, zordsdavini
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from os import system
22
23 from libqtile.extension.dmenu import Dmenu
24
25
26 class CommandSet(Dmenu):
27 """
28 Give list of commands to be executed in dmenu style.
29
30 ex. manage mocp deamon:
31
32 .. code-block:: python
33
34 Key([mod], 'm', lazy.run_extension(extension.CommandSet(
35 commands={
36 'play/pause': '[ $(mocp -i | wc -l) -lt 2 ] && mocp -p || mocp -G',
37 'next': 'mocp -f',
38 'previous': 'mocp -r',
39 'quit': 'mocp -x',
40 'open': 'urxvt -e mocp',
41 'shuffle': 'mocp -t shuffle',
42 'repeat': 'mocp -t repeat',
43 },
44 pre_commands=['[ $(mocp -i | wc -l) -lt 1 ] && mocp -S'],
45 **Theme.dmenu))),
46
47 """
48
49 defaults = [
50 ("commands", None, "dictionary of commands where key is runable command"),
51 ("pre_commands", None, "list of commands to be executed before getting dmenu answer"),
52 ]
53
54 def __init__(self, **config):
55 Dmenu.__init__(self, **config)
56 self.add_defaults(CommandSet.defaults)
57
58 def run(self):
59 if not self.commands:
60 return
61
62 if self.pre_commands:
63 for cmd in self.pre_commands:
64 system(cmd)
65
66 out = super(CommandSet, self).run(items=self.commands.keys())
67
68 try:
69 sout = out.rstrip('\n')
70 except AttributeError:
71 # out is not a string (for example it's a Popen object returned
72 # by super(WindowList, self).run() when there are no menu items to
73 # list
74 return
75
76 if sout not in self.commands:
77 return
78
79 system(self.commands[sout])
80
```
Path: `libqtile/extension/base.py`
Content:
```
1 # Copyright (c) 2017 Dario Giovannetti
2 # Copyright (c) 2021 elParaguayo
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a copy
5 # of this software and associated documentation files (the "Software"), to deal
6 # in the Software without restriction, including without limitation the rights
7 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 # copies of the Software, and to permit persons to whom the Software is
9 # furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 # SOFTWARE.
21 import re
22 import shlex
23 from subprocess import PIPE, Popen
24 from typing import Any, List, Tuple # noqa: F401
25
26 from libqtile import configurable
27 from libqtile.log_utils import logger
28
29 RGB = re.compile(r"^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$")
30
31
32 class _Extension(configurable.Configurable):
33 """Base Extension class"""
34
35 installed_extensions = [] # type: List
36
37 defaults = [
38 ("font", "sans", "defines the font name to be used"),
39 ("fontsize", None, "defines the font size to be used"),
40 ("background", None, "defines the normal background color (#RGB or #RRGGBB)"),
41 ("foreground", None, "defines the normal foreground color (#RGB or #RRGGBB)"),
42 ("selected_background", None, "defines the selected background color (#RGB or #RRGGBB)"),
43 ("selected_foreground", None, "defines the selected foreground color (#RGB or #RRGGBB)"),
44 ]
45
46 def __init__(self, **config):
47 configurable.Configurable.__init__(self, **config)
48 self.add_defaults(_Extension.defaults)
49 _Extension.installed_extensions.append(self)
50 self._check_colors()
51
52 def _check_colors(self):
53 """
54 dmenu needs colours to be in #rgb or #rrggbb format.
55
56 Checks colour value, removes invalid values and adds # if missing.
57 """
58 for c in ["background", "foreground", "selected_background", "selected_foreground"]:
59 col = getattr(self, c, None)
60 if col is None:
61 continue
62
63 if not isinstance(col, str) or not RGB.match(col):
64 logger.warning(
65 f"Invalid extension '{c}' color: {col}. "
66 f"Must be #RGB or #RRGGBB string."
67 )
68 setattr(self, c, None)
69 continue
70
71 if not col.startswith("#"):
72 col = f"#{col}"
73 setattr(self, c, col)
74
75 def _configure(self, qtile):
76 self.qtile = qtile
77
78 def run(self):
79 """
80 This method must be implemented by the subclasses.
81 """
82 raise NotImplementedError()
83
84
85 class RunCommand(_Extension):
86 """
87 Run an arbitrary command.
88
89 Mostly useful as a superclass for more specific extensions that need to
90 interact with the qtile object.
91
92 Also consider simply using lazy.spawn() or writing a
93 `client <http://docs.qtile.org/en/latest/manual/commands/scripting.html>`_.
94 """
95 defaults = [
96 # NOTE: Do not use a list as a default value, since it would be shared
97 # among all the objects inheriting this class, and if one of them
98 # modified it, all the other objects would see the modified list;
99 # use a string or a tuple instead, which are immutable
100 ("command", None, "the command to be launched (string or list with arguments)"),
101 ] # type: List[Tuple[str, Any, str]]
102
103 def __init__(self, **config):
104 _Extension.__init__(self, **config)
105 self.add_defaults(RunCommand.defaults)
106 self.configured_command = None
107
108 def run(self):
109 """
110 An extension can inherit this class, define configured_command and use
111 the process object by overriding this method and using super():
112
113 .. code-block:: python
114
115 def _configure(self, qtile):
116 Superclass._configure(self, qtile)
117 self.configured_command = "foo --bar"
118
119 def run(self):
120 process = super(Subclass, self).run()
121 """
122 if self.configured_command:
123 if isinstance(self.configured_command, str):
124 self.configured_command = shlex.split(self.configured_command)
125 # Else assume that self.configured_command is already a sequence
126 else:
127 self.configured_command = self.command
128 return Popen(self.configured_command, stdout=PIPE, stdin=PIPE)
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/extension/base.py b/libqtile/extension/base.py
--- a/libqtile/extension/base.py
+++ b/libqtile/extension/base.py
@@ -47,13 +47,15 @@
configurable.Configurable.__init__(self, **config)
self.add_defaults(_Extension.defaults)
_Extension.installed_extensions.append(self)
- self._check_colors()
def _check_colors(self):
"""
dmenu needs colours to be in #rgb or #rrggbb format.
Checks colour value, removes invalid values and adds # if missing.
+
+ NB This should not be called in _Extension.__init__ as _Extension.global_defaults
+ may not have been set at this point.
"""
for c in ["background", "foreground", "selected_background", "selected_foreground"]:
col = getattr(self, c, None)
@@ -74,6 +76,7 @@
def _configure(self, qtile):
self.qtile = qtile
+ self._check_colors()
def run(self):
"""
diff --git a/libqtile/extension/command_set.py b/libqtile/extension/command_set.py
--- a/libqtile/extension/command_set.py
+++ b/libqtile/extension/command_set.py
@@ -18,8 +18,6 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
-from os import system
-
from libqtile.extension.dmenu import Dmenu
@@ -61,7 +59,7 @@
if self.pre_commands:
for cmd in self.pre_commands:
- system(cmd)
+ self.qtile.cmd_spawn(cmd)
out = super(CommandSet, self).run(items=self.commands.keys())
@@ -76,4 +74,4 @@
if sout not in self.commands:
return
- system(self.commands[sout])
+ self.qtile.cmd_spawn(self.commands[sout])
|
{"golden_diff": "diff --git a/libqtile/extension/base.py b/libqtile/extension/base.py\n--- a/libqtile/extension/base.py\n+++ b/libqtile/extension/base.py\n@@ -47,13 +47,15 @@\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Extension.defaults)\n _Extension.installed_extensions.append(self)\n- self._check_colors()\n \n def _check_colors(self):\n \"\"\"\n dmenu needs colours to be in #rgb or #rrggbb format.\n \n Checks colour value, removes invalid values and adds # if missing.\n+\n+ NB This should not be called in _Extension.__init__ as _Extension.global_defaults\n+ may not have been set at this point.\n \"\"\"\n for c in [\"background\", \"foreground\", \"selected_background\", \"selected_foreground\"]:\n col = getattr(self, c, None)\n@@ -74,6 +76,7 @@\n \n def _configure(self, qtile):\n self.qtile = qtile\n+ self._check_colors()\n \n def run(self):\n \"\"\"\ndiff --git a/libqtile/extension/command_set.py b/libqtile/extension/command_set.py\n--- a/libqtile/extension/command_set.py\n+++ b/libqtile/extension/command_set.py\n@@ -18,8 +18,6 @@\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n \n-from os import system\n-\n from libqtile.extension.dmenu import Dmenu\n \n \n@@ -61,7 +59,7 @@\n \n if self.pre_commands:\n for cmd in self.pre_commands:\n- system(cmd)\n+ self.qtile.cmd_spawn(cmd)\n \n out = super(CommandSet, self).run(items=self.commands.keys())\n \n@@ -76,4 +74,4 @@\n if sout not in self.commands:\n return\n \n- system(self.commands[sout])\n+ self.qtile.cmd_spawn(self.commands[sout])\n", "issue": "DmenuRun doesn't take colors from extension_defaults\nThis is split from https://github.com/qtile/qtile/issues/2730\r\n\r\nQtile version: 0.18.2.dev130+g3fe5e0d1\r\nThe problem starts in commit: https://github.com/qtile/qtile/commit/d7016e1dc27680437d89efb322d5e7db03e6b8fe\r\n\r\n## test case\r\n```\r\n EzKey(\"M-r\", lazy.run_extension(extension.DmenuRun()), desc='dmenu run'),\r\n ...\r\nextension_defaults = dict(\r\n background='#000000',\r\n foreground='#00ff00',\r\n selected_background='#00ff00',\r\n selected_foreground='#000000'\r\n)\r\n```\r\n\r\nNo errors\nDmenuRun doesn't take colors from extension_defaults\nThis is split from https://github.com/qtile/qtile/issues/2730\r\n\r\nQtile version: 0.18.2.dev130+g3fe5e0d1\r\nThe problem starts in commit: https://github.com/qtile/qtile/commit/d7016e1dc27680437d89efb322d5e7db03e6b8fe\r\n\r\n## test case\r\n```\r\n EzKey(\"M-r\", lazy.run_extension(extension.DmenuRun()), desc='dmenu run'),\r\n ...\r\nextension_defaults = dict(\r\n background='#000000',\r\n foreground='#00ff00',\r\n selected_background='#00ff00',\r\n selected_foreground='#000000'\r\n)\r\n```\r\n\r\nNo errors\n", "before_files": [{"content": "# Copyright (C) 2018, zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom os import system\n\nfrom libqtile.extension.dmenu import Dmenu\n\n\nclass CommandSet(Dmenu):\n \"\"\"\n Give list of commands to be executed in dmenu style.\n\n ex. manage mocp deamon:\n\n .. code-block:: python\n\n Key([mod], 'm', lazy.run_extension(extension.CommandSet(\n commands={\n 'play/pause': '[ $(mocp -i | wc -l) -lt 2 ] && mocp -p || mocp -G',\n 'next': 'mocp -f',\n 'previous': 'mocp -r',\n 'quit': 'mocp -x',\n 'open': 'urxvt -e mocp',\n 'shuffle': 'mocp -t shuffle',\n 'repeat': 'mocp -t repeat',\n },\n pre_commands=['[ $(mocp -i | wc -l) -lt 1 ] && mocp -S'],\n **Theme.dmenu))),\n\n \"\"\"\n\n defaults = [\n (\"commands\", None, \"dictionary of commands where key is runable command\"),\n (\"pre_commands\", None, \"list of commands to be executed before getting dmenu answer\"),\n ]\n\n def __init__(self, **config):\n Dmenu.__init__(self, **config)\n self.add_defaults(CommandSet.defaults)\n\n def run(self):\n if not self.commands:\n return\n\n if self.pre_commands:\n for cmd in self.pre_commands:\n system(cmd)\n\n out = super(CommandSet, self).run(items=self.commands.keys())\n\n try:\n sout = out.rstrip('\\n')\n except AttributeError:\n # out is not a string (for example it's a Popen object returned\n # by super(WindowList, self).run() when there are no menu items to\n # list\n return\n\n if sout not in self.commands:\n return\n\n system(self.commands[sout])\n", "path": "libqtile/extension/command_set.py"}, {"content": "# Copyright (c) 2017 Dario Giovannetti\n# Copyright (c) 2021 elParaguayo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport re\nimport shlex\nfrom subprocess import PIPE, Popen\nfrom typing import Any, List, Tuple # noqa: F401\n\nfrom libqtile import configurable\nfrom libqtile.log_utils import logger\n\nRGB = re.compile(r\"^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$\")\n\n\nclass _Extension(configurable.Configurable):\n \"\"\"Base Extension class\"\"\"\n\n installed_extensions = [] # type: List\n\n defaults = [\n (\"font\", \"sans\", \"defines the font name to be used\"),\n (\"fontsize\", None, \"defines the font size to be used\"),\n (\"background\", None, \"defines the normal background color (#RGB or #RRGGBB)\"),\n (\"foreground\", None, \"defines the normal foreground color (#RGB or #RRGGBB)\"),\n (\"selected_background\", None, \"defines the selected background color (#RGB or #RRGGBB)\"),\n (\"selected_foreground\", None, \"defines the selected foreground color (#RGB or #RRGGBB)\"),\n ]\n\n def __init__(self, **config):\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Extension.defaults)\n _Extension.installed_extensions.append(self)\n self._check_colors()\n\n def _check_colors(self):\n \"\"\"\n dmenu needs colours to be in #rgb or #rrggbb format.\n\n Checks colour value, removes invalid values and adds # if missing.\n \"\"\"\n for c in [\"background\", \"foreground\", \"selected_background\", \"selected_foreground\"]:\n col = getattr(self, c, None)\n if col is None:\n continue\n\n if not isinstance(col, str) or not RGB.match(col):\n logger.warning(\n f\"Invalid extension '{c}' color: {col}. \"\n f\"Must be #RGB or #RRGGBB string.\"\n )\n setattr(self, c, None)\n continue\n\n if not col.startswith(\"#\"):\n col = f\"#{col}\"\n setattr(self, c, col)\n\n def _configure(self, qtile):\n self.qtile = qtile\n\n def run(self):\n \"\"\"\n This method must be implemented by the subclasses.\n \"\"\"\n raise NotImplementedError()\n\n\nclass RunCommand(_Extension):\n \"\"\"\n Run an arbitrary command.\n\n Mostly useful as a superclass for more specific extensions that need to\n interact with the qtile object.\n\n Also consider simply using lazy.spawn() or writing a\n `client <http://docs.qtile.org/en/latest/manual/commands/scripting.html>`_.\n \"\"\"\n defaults = [\n # NOTE: Do not use a list as a default value, since it would be shared\n # among all the objects inheriting this class, and if one of them\n # modified it, all the other objects would see the modified list;\n # use a string or a tuple instead, which are immutable\n (\"command\", None, \"the command to be launched (string or list with arguments)\"),\n ] # type: List[Tuple[str, Any, str]]\n\n def __init__(self, **config):\n _Extension.__init__(self, **config)\n self.add_defaults(RunCommand.defaults)\n self.configured_command = None\n\n def run(self):\n \"\"\"\n An extension can inherit this class, define configured_command and use\n the process object by overriding this method and using super():\n\n .. code-block:: python\n\n def _configure(self, qtile):\n Superclass._configure(self, qtile)\n self.configured_command = \"foo --bar\"\n\n def run(self):\n process = super(Subclass, self).run()\n \"\"\"\n if self.configured_command:\n if isinstance(self.configured_command, str):\n self.configured_command = shlex.split(self.configured_command)\n # Else assume that self.configured_command is already a sequence\n else:\n self.configured_command = self.command\n return Popen(self.configured_command, stdout=PIPE, stdin=PIPE)\n", "path": "libqtile/extension/base.py"}], "after_files": [{"content": "# Copyright (C) 2018, zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile.extension.dmenu import Dmenu\n\n\nclass CommandSet(Dmenu):\n \"\"\"\n Give list of commands to be executed in dmenu style.\n\n ex. manage mocp deamon:\n\n .. code-block:: python\n\n Key([mod], 'm', lazy.run_extension(extension.CommandSet(\n commands={\n 'play/pause': '[ $(mocp -i | wc -l) -lt 2 ] && mocp -p || mocp -G',\n 'next': 'mocp -f',\n 'previous': 'mocp -r',\n 'quit': 'mocp -x',\n 'open': 'urxvt -e mocp',\n 'shuffle': 'mocp -t shuffle',\n 'repeat': 'mocp -t repeat',\n },\n pre_commands=['[ $(mocp -i | wc -l) -lt 1 ] && mocp -S'],\n **Theme.dmenu))),\n\n \"\"\"\n\n defaults = [\n (\"commands\", None, \"dictionary of commands where key is runable command\"),\n (\"pre_commands\", None, \"list of commands to be executed before getting dmenu answer\"),\n ]\n\n def __init__(self, **config):\n Dmenu.__init__(self, **config)\n self.add_defaults(CommandSet.defaults)\n\n def run(self):\n if not self.commands:\n return\n\n if self.pre_commands:\n for cmd in self.pre_commands:\n self.qtile.cmd_spawn(cmd)\n\n out = super(CommandSet, self).run(items=self.commands.keys())\n\n try:\n sout = out.rstrip('\\n')\n except AttributeError:\n # out is not a string (for example it's a Popen object returned\n # by super(WindowList, self).run() when there are no menu items to\n # list\n return\n\n if sout not in self.commands:\n return\n\n self.qtile.cmd_spawn(self.commands[sout])\n", "path": "libqtile/extension/command_set.py"}, {"content": "# Copyright (c) 2017 Dario Giovannetti\n# Copyright (c) 2021 elParaguayo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport re\nimport shlex\nfrom subprocess import PIPE, Popen\nfrom typing import Any, List, Tuple # noqa: F401\n\nfrom libqtile import configurable\nfrom libqtile.log_utils import logger\n\nRGB = re.compile(r\"^#?([a-fA-F0-9]{3}|[a-fA-F0-9]{6})$\")\n\n\nclass _Extension(configurable.Configurable):\n \"\"\"Base Extension class\"\"\"\n\n installed_extensions = [] # type: List\n\n defaults = [\n (\"font\", \"sans\", \"defines the font name to be used\"),\n (\"fontsize\", None, \"defines the font size to be used\"),\n (\"background\", None, \"defines the normal background color (#RGB or #RRGGBB)\"),\n (\"foreground\", None, \"defines the normal foreground color (#RGB or #RRGGBB)\"),\n (\"selected_background\", None, \"defines the selected background color (#RGB or #RRGGBB)\"),\n (\"selected_foreground\", None, \"defines the selected foreground color (#RGB or #RRGGBB)\"),\n ]\n\n def __init__(self, **config):\n configurable.Configurable.__init__(self, **config)\n self.add_defaults(_Extension.defaults)\n _Extension.installed_extensions.append(self)\n\n def _check_colors(self):\n \"\"\"\n dmenu needs colours to be in #rgb or #rrggbb format.\n\n Checks colour value, removes invalid values and adds # if missing.\n\n NB This should not be called in _Extension.__init__ as _Extension.global_defaults\n may not have been set at this point.\n \"\"\"\n for c in [\"background\", \"foreground\", \"selected_background\", \"selected_foreground\"]:\n col = getattr(self, c, None)\n if col is None:\n continue\n\n if not isinstance(col, str) or not RGB.match(col):\n logger.warning(\n f\"Invalid extension '{c}' color: {col}. \"\n f\"Must be #RGB or #RRGGBB string.\"\n )\n setattr(self, c, None)\n continue\n\n if not col.startswith(\"#\"):\n col = f\"#{col}\"\n setattr(self, c, col)\n\n def _configure(self, qtile):\n self.qtile = qtile\n self._check_colors()\n\n def run(self):\n \"\"\"\n This method must be implemented by the subclasses.\n \"\"\"\n raise NotImplementedError()\n\n\nclass RunCommand(_Extension):\n \"\"\"\n Run an arbitrary command.\n\n Mostly useful as a superclass for more specific extensions that need to\n interact with the qtile object.\n\n Also consider simply using lazy.spawn() or writing a\n `client <http://docs.qtile.org/en/latest/manual/commands/scripting.html>`_.\n \"\"\"\n defaults = [\n # NOTE: Do not use a list as a default value, since it would be shared\n # among all the objects inheriting this class, and if one of them\n # modified it, all the other objects would see the modified list;\n # use a string or a tuple instead, which are immutable\n (\"command\", None, \"the command to be launched (string or list with arguments)\"),\n ] # type: List[Tuple[str, Any, str]]\n\n def __init__(self, **config):\n _Extension.__init__(self, **config)\n self.add_defaults(RunCommand.defaults)\n self.configured_command = None\n\n def run(self):\n \"\"\"\n An extension can inherit this class, define configured_command and use\n the process object by overriding this method and using super():\n\n .. code-block:: python\n\n def _configure(self, qtile):\n Superclass._configure(self, qtile)\n self.configured_command = \"foo --bar\"\n\n def run(self):\n process = super(Subclass, self).run()\n \"\"\"\n if self.configured_command:\n if isinstance(self.configured_command, str):\n self.configured_command = shlex.split(self.configured_command)\n # Else assume that self.configured_command is already a sequence\n else:\n self.configured_command = self.command\n return Popen(self.configured_command, stdout=PIPE, stdin=PIPE)\n", "path": "libqtile/extension/base.py"}]}
| 2,881 | 437 |
gh_patches_debug_995
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-3136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The content area in the docs is too narrow
## 📚 Documentation
In the documentation strings, rendered code examples only fit 63 characters of width. It would be nice if we could make the content area a bit larger so that code examples fit at least 80 characters. On my screen, the content area is exactly the middle third of the screen, with the left and right thirds devoted to menus.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2
3 # -- Path setup --------------------------------------------------------------
4
5 import atexit
6 import os
7 import shutil
8 import subprocess
9 import sys
10 from pathlib import Path
11 from typing import Any
12 from unittest import mock
13
14 # -- Project information -----------------------------------------------------
15
16 project = "Pyodide"
17 copyright = "2019-2022, Pyodide contributors and Mozilla"
18 pyodide_version = "0.22.0.dev0"
19
20 if ".dev" in pyodide_version or os.environ.get("READTHEDOCS_VERSION") == "latest":
21 CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/"
22 else:
23 CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/"
24
25 # -- General configuration ---------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 #
29 # needs_sphinx = '1.0'
30
31 extensions = [
32 "sphinx.ext.autodoc",
33 "sphinx.ext.autosummary",
34 "sphinxcontrib.napoleon",
35 "myst_parser",
36 "sphinx_js",
37 "autodocsumm",
38 "sphinx_panels",
39 "sphinx_pyodide",
40 "sphinx_argparse_cli",
41 "versionwarning.extension",
42 "sphinx_issues",
43 ]
44
45 myst_enable_extensions = ["substitution"]
46
47 js_language = "typescript"
48 jsdoc_config_path = "../src/js/tsconfig.json"
49 root_for_relative_js_paths = "../src/"
50 issues_github_path = "pyodide/pyodide"
51
52 versionwarning_messages = {
53 "latest": (
54 "This is the development version of the documentation. "
55 'See <a href="https://pyodide.org/">here</a> for latest stable '
56 "documentation. Please do not use Pyodide with non "
57 "versioned (`dev`) URLs from the CDN for deployed applications!"
58 )
59 }
60 versionwarning_body_selector = "#main-content > div"
61
62 autosummary_generate = True
63 autodoc_default_flags = ["members", "inherited-members"]
64
65 # Add modules to be mocked.
66 mock_modules = ["ruamel.yaml", "tomli"]
67
68 # Add any paths that contain templates here, relative to this directory.
69 templates_path = ["_templates"]
70
71 # The suffix(es) of source filenames.
72 source_suffix = [".rst", ".md"]
73
74 # The master toctree document.
75 master_doc = "index"
76
77 # The language for content autogenerated by Sphinx.
78 language = None
79
80 # List of patterns, relative to source directory, that match files and
81 # directories to ignore when looking for source files.
82 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"]
83
84 # The name of the Pygments (syntax highlighting) style to use.
85 pygments_style = None
86
87 # -- Options for HTML output -------------------------------------------------
88
89 # The theme to use for HTML and HTML Help pages. See the documentation for
90 # a list of builtin themes.
91 #
92 html_theme = "sphinx_book_theme"
93 html_logo = "_static/img/pyodide-logo.png"
94
95 # theme-specific options
96 html_theme_options: dict[str, Any] = {}
97
98 # paths that contain custom static files (such as style sheets)
99 html_static_path = ["_static"]
100
101
102 html_css_files = [
103 "css/pyodide.css",
104 ]
105
106 # Custom sidebar templates, must be a dictionary that maps document names
107 # to template names.
108 # html_sidebars = {}
109
110 # -- Options for HTMLHelp output ---------------------------------------------
111
112 # Output file base name for HTML help builder.
113 htmlhelp_basename = "Pyodidedoc"
114
115 # A list of files that should not be packed into the epub file.
116 epub_exclude_files = ["search.html"]
117
118
119 def delete_attrs(cls):
120 for name in dir(cls):
121 if not name.startswith("_"):
122 try:
123 delattr(cls, name)
124 except Exception:
125 pass
126
127
128 # Try not to cause side effects if we are imported incidentally.
129
130 try:
131 import sphinx
132
133 IN_SPHINX = hasattr(sphinx, "application")
134 except ImportError:
135 IN_SPHINX = False
136
137 IN_READTHEDOCS = "READTHEDOCS" in os.environ
138
139 if IN_READTHEDOCS:
140 env = {"PYODIDE_BASE_URL": CDN_URL}
141 os.makedirs("_build/html", exist_ok=True)
142 res = subprocess.check_output(
143 ["make", "-C", "..", "docs/_build/html/console.html"],
144 env=env,
145 stderr=subprocess.STDOUT,
146 encoding="utf-8",
147 )
148 print(res)
149 # insert the Plausible analytics script to console.html
150 console_path = Path("_build/html/console.html")
151 console_html = console_path.read_text().splitlines(keepends=True)
152 for idx, line in enumerate(list(console_html)):
153 if 'pyodide.js">' in line:
154 # insert the analytics script after the `pyodide.js` script
155 console_html.insert(
156 idx,
157 '<script defer data-domain="pyodide.org" src="https://plausible.io/js/plausible.js"></script>\n',
158 )
159 break
160 else:
161 raise ValueError("Could not find pyodide.js in the <head> section")
162 console_path.write_text("".join(console_html))
163
164
165 if IN_SPHINX:
166 # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10
167 import collections
168 from typing import Callable, Mapping
169
170 collections.Mapping = Mapping # type: ignore[attr-defined]
171 collections.Callable = Callable # type: ignore[attr-defined]
172
173 base_dir = Path(__file__).resolve().parent.parent
174 path_dirs = [
175 str(base_dir),
176 str(base_dir / "pyodide-build"),
177 str(base_dir / "docs/sphinx_pyodide"),
178 str(base_dir / "src/py"),
179 str(base_dir / "packages/micropip/src"),
180 ]
181 sys.path = path_dirs + sys.path
182
183 import micropip # noqa: F401
184 import pyodide
185
186 # We hacked it so that autodoc will look for submodules, but only if we import
187 # them here. TODO: look these up in the source directory?
188 import pyodide.code
189 import pyodide.console
190 import pyodide.ffi.wrappers
191 import pyodide.http
192 import pyodide.webloop
193
194 # The full version, including alpha/beta/rc tags.
195 release = version = pyodide.__version__
196 html_title = f"Version {version}"
197
198 shutil.copy("../src/core/pyproxy.ts", "../src/js/pyproxy.gen.ts")
199 shutil.copy("../src/core/error_handling.ts", "../src/js/error_handling.gen.ts")
200 js_source_path = [str(x) for x in Path("../src/js").glob("*.ts")]
201
202 def remove_pyproxy_gen_ts():
203 Path("../src/js/pyproxy.gen.ts").unlink(missing_ok=True)
204
205 atexit.register(remove_pyproxy_gen_ts)
206
207 os.environ["PATH"] += f':{str(Path("../src/js/node_modules/.bin").resolve())}'
208 print(os.environ["PATH"])
209 if IN_READTHEDOCS:
210 subprocess.run(["npm", "ci"], cwd="../src/js")
211 elif not shutil.which("typedoc"):
212 raise Exception(
213 "Before building the Pyodide docs you must run 'npm install' in 'src/js'."
214 )
215
216 # Prevent API docs for webloop methods: they are the same as for base event loop
217 # and it clutters api docs too much
218 delete_attrs(pyodide.webloop.WebLoop)
219 delete_attrs(pyodide.webloop.WebLoopPolicy)
220 delete_attrs(pyodide.console.PyodideConsole)
221
222 for module in mock_modules:
223 sys.modules[module] = mock.Mock()
224
225
226 # https://github.com/sphinx-doc/sphinx/issues/4054
227 def globalReplace(app, docname, source):
228 result = source[0]
229 for key in app.config.global_replacements:
230 result = result.replace(key, app.config.global_replacements[key])
231 source[0] = result
232
233
234 global_replacements = {"{{PYODIDE_CDN_URL}}": CDN_URL}
235
236
237 def setup(app):
238 app.add_config_value("global_replacements", {}, True)
239 app.connect("source-read", globalReplace)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -11,6 +11,8 @@
from typing import Any
from unittest import mock
+panels_add_bootstrap_css = False
+
# -- Project information -----------------------------------------------------
project = "Pyodide"
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -11,6 +11,8 @@\n from typing import Any\n from unittest import mock\n \n+panels_add_bootstrap_css = False\n+\n # -- Project information -----------------------------------------------------\n \n project = \"Pyodide\"\n", "issue": "The content area in the docs is too narrow\n## \ud83d\udcda Documentation\r\n\r\nIn the documentation strings, rendered code examples only fit 63 characters of width. It would be nice if we could make the content area a bit larger so that code examples fit at least 80 characters. On my screen, the content area is exactly the middle third of the screen, with the left and right thirds devoted to menus.\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.22.0.dev0\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n # insert the Plausible analytics script to console.html\n console_path = Path(\"_build/html/console.html\")\n console_html = console_path.read_text().splitlines(keepends=True)\n for idx, line in enumerate(list(console_html)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n console_path.write_text(\"\".join(console_html))\n\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\npanels_add_bootstrap_css = False\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\npyodide_version = \"0.22.0.dev0\"\n\nif \".dev\" in pyodide_version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\nelse:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{pyodide_version}/full/\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add modules to be mocked.\nmock_modules = [\"ruamel.yaml\", \"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except Exception:\n pass\n\n\n# Try not to cause side effects if we are imported incidentally.\n\ntry:\n import sphinx\n\n IN_SPHINX = hasattr(sphinx, \"application\")\nexcept ImportError:\n IN_SPHINX = False\n\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\nif IN_READTHEDOCS:\n env = {\"PYODIDE_BASE_URL\": CDN_URL}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n # insert the Plausible analytics script to console.html\n console_path = Path(\"_build/html/console.html\")\n console_html = console_path.read_text().splitlines(keepends=True)\n for idx, line in enumerate(list(console_html)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n console_path.write_text(\"\".join(console_html))\n\n\nif IN_SPHINX:\n # Compatibility shims. sphinx-js and sphinxcontrib-napoleon have not been updated for Python 3.10\n import collections\n from typing import Callable, Mapping\n\n collections.Mapping = Mapping # type: ignore[attr-defined]\n collections.Callable = Callable # type: ignore[attr-defined]\n\n base_dir = Path(__file__).resolve().parent.parent\n path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n ]\n sys.path = path_dirs + sys.path\n\n import micropip # noqa: F401\n import pyodide\n\n # We hacked it so that autodoc will look for submodules, but only if we import\n # them here. TODO: look these up in the source directory?\n import pyodide.code\n import pyodide.console\n import pyodide.ffi.wrappers\n import pyodide.http\n import pyodide.webloop\n\n # The full version, including alpha/beta/rc tags.\n release = version = pyodide.__version__\n html_title = f\"Version {version}\"\n\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n elif not shutil.which(\"typedoc\"):\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef globalReplace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nglobal_replacements = {\"{{PYODIDE_CDN_URL}}\": CDN_URL}\n\n\ndef setup(app):\n app.add_config_value(\"global_replacements\", {}, True)\n app.connect(\"source-read\", globalReplace)\n", "path": "docs/conf.py"}]}
| 2,772 | 69 |
gh_patches_debug_3010
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-208
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Redhat 6.5 can no longer raise a TestError
Regressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this.
```
0.00ns [34mINFO [39m cocotb.gpi gpi_embed.c:213 in embed_sim_init [34mRunning on Icarus Verilog version 0.10.0 (devel)[39m
0.00ns [34mINFO [39m cocotb.gpi gpi_embed.c:214 in embed_sim_init [34mPython interpreter initialised and cocotb loaded![39m
0.00ns [34mINFO [39m cocotb.gpi __init__.py:96 in _initialise_testbench [34mSeeding Python random module with 1421853826[39m
0.00ns [34mINFO [39m cocotb.gpi __init__.py:110 in _initialise_testbench [34mRunning tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64[39m
0.00ns [31mERROR [39m cocotb.coroutine.fail decorators.py:99 in __init__ [31mtest_duplicate_yield isn't a value coroutine! Did you use the yield keyword?[39m
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py", line 128, in _initialise_testbench
regression.initialise()
File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py", line 123, in initialise
test = thing(self._dut)
File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py", line 356, in _wrapped_test
raise_error(self, str(e))
File "/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py", line 42, in raise_error
if sys.version_info.major >= 3:
AttributeError: 'tuple' object has no attribute 'major'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/result.py`
Content:
```
1 ''' Copyright (c) 2013 Potential Ventures Ltd
2 Copyright (c) 2013 SolarFlare Communications Inc
3 All rights reserved.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 * Redistributions of source code must retain the above copyright
8 notice, this list of conditions and the following disclaimer.
9 * Redistributions in binary form must reproduce the above copyright
10 notice, this list of conditions and the following disclaimer in the
11 documentation and/or other materials provided with the distribution.
12 * Neither the name of Potential Ventures Ltd,
13 SolarFlare Communications Inc nor the
14 names of its contributors may be used to endorse or promote products
15 derived from this software without specific prior written permission.
16
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
27
28 # TODO: Coule use cStringIO?
29 import traceback
30 import sys
31 #from StringIO import StringIO
32 from io import StringIO, BytesIO
33
34 def raise_error(obj, msg):
35 """
36 Creates a TestError exception and raises it after printing a traceback
37
38 obj has a log method
39 msg is a string
40 """
41 exc_type, exc_value, exc_traceback = sys.exc_info()
42 if sys.version_info.major >= 3:
43 buff = StringIO()
44 traceback.print_tb(exc_traceback, file=buff)
45 else:
46 buff_bytes = BytesIO()
47 traceback.print_tb(exc_traceback, file=buff_bytes)
48 buff = StringIO(buff_bytes.getvalue().decode("UTF-8"))
49 obj.log.error("%s\n%s" % (msg, buff.getvalue()))
50 exception = TestError(msg)
51 exception.stderr.write(buff.getvalue())
52 raise exception
53
54 def create_error(obj, msg):
55 """
56 As above, but return the exception rather than raise it, simply to avoid
57 too many levels of nested try/except blocks
58 """
59 try:
60 raise_error(obj, msg)
61 except TestError as error:
62 return error
63 return TestError("Creating error traceback failed")
64
65
66 class ReturnValue(StopIteration):
67 def __init__(self, retval):
68 self.retval = retval
69
70 class TestComplete(StopIteration):
71 """
72 Exceptions are used to pass test results around.
73 """
74 def __init__(self, *args, **kwargs):
75 super(TestComplete, self).__init__(*args, **kwargs)
76 self.stdout = StringIO()
77 self.stderr = StringIO()
78
79 class TestError(TestComplete): pass
80
81 class TestFailure(TestComplete): pass
82
83 class TestSuccess(TestComplete): pass
84
85 class SimFailure(TestComplete): pass
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cocotb/result.py b/cocotb/result.py
--- a/cocotb/result.py
+++ b/cocotb/result.py
@@ -39,7 +39,8 @@
msg is a string
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
- if sys.version_info.major >= 3:
+ # 2.6 cannot use named access
+ if sys.version_info[0] >= 3:
buff = StringIO()
traceback.print_tb(exc_traceback, file=buff)
else:
|
{"golden_diff": "diff --git a/cocotb/result.py b/cocotb/result.py\n--- a/cocotb/result.py\n+++ b/cocotb/result.py\n@@ -39,7 +39,8 @@\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n- if sys.version_info.major >= 3:\n+ # 2.6 cannot use named access\n+ if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n", "issue": "Redhat 6.5 can no longer raise a TestError\nRegressions report pass but number of tests has gone done on some simulators. Icarus for instance shows this.\n\n```\n0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:213 in embed_sim_init \u001b[34mRunning on Icarus Verilog version 0.10.0 (devel)\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi gpi_embed.c:214 in embed_sim_init \u001b[34mPython interpreter initialised and cocotb loaded!\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:96 in _initialise_testbench \u001b[34mSeeding Python random module with 1421853826\u001b[39m\n 0.00ns \u001b[34mINFO \u001b[39m cocotb.gpi __init__.py:110 in _initialise_testbench \u001b[34mRunning tests with Cocotb v0.5a from /var/lib/jenkins/workspace/cocotb_icarus_x86_64\u001b[39m\n 0.00ns \u001b[31mERROR \u001b[39m cocotb.coroutine.fail decorators.py:99 in __init__ \u001b[31mtest_duplicate_yield isn't a value coroutine! Did you use the yield keyword?\u001b[39m\nTraceback (most recent call last):\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/__init__.py\", line 128, in _initialise_testbench\n regression.initialise()\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/regression.py\", line 123, in initialise\n test = thing(self._dut)\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/decorators.py\", line 356, in _wrapped_test\n raise_error(self, str(e))\n File \"/var/lib/jenkins/workspace/cocotb_icarus_x86_64/cocotb/result.py\", line 42, in raise_error\n if sys.version_info.major >= 3:\nAttributeError: 'tuple' object has no attribute 'major'\n```\n\n", "before_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# TODO: Coule use cStringIO?\nimport traceback\nimport sys\n#from StringIO import StringIO\nfrom io import StringIO, BytesIO\n\ndef raise_error(obj, msg):\n \"\"\"\n Creates a TestError exception and raises it after printing a traceback\n\n obj has a log method\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if sys.version_info.major >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_tb(exc_traceback, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF-8\"))\n obj.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n raise exception\n\ndef create_error(obj, msg):\n \"\"\"\n As above, but return the exception rather than raise it, simply to avoid\n too many levels of nested try/except blocks\n \"\"\"\n try:\n raise_error(obj, msg)\n except TestError as error:\n return error\n return TestError(\"Creating error traceback failed\")\n\n\nclass ReturnValue(StopIteration):\n def __init__(self, retval):\n self.retval = retval\n\nclass TestComplete(StopIteration):\n \"\"\"\n Exceptions are used to pass test results around.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TestComplete, self).__init__(*args, **kwargs)\n self.stdout = StringIO()\n self.stderr = StringIO()\n\nclass TestError(TestComplete): pass\n\nclass TestFailure(TestComplete): pass\n\nclass TestSuccess(TestComplete): pass\n\nclass SimFailure(TestComplete): pass\n", "path": "cocotb/result.py"}], "after_files": [{"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n# TODO: Coule use cStringIO?\nimport traceback\nimport sys\n#from StringIO import StringIO\nfrom io import StringIO, BytesIO\n\ndef raise_error(obj, msg):\n \"\"\"\n Creates a TestError exception and raises it after printing a traceback\n\n obj has a log method\n msg is a string\n \"\"\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n # 2.6 cannot use named access\n if sys.version_info[0] >= 3:\n buff = StringIO()\n traceback.print_tb(exc_traceback, file=buff)\n else:\n buff_bytes = BytesIO()\n traceback.print_tb(exc_traceback, file=buff_bytes)\n buff = StringIO(buff_bytes.getvalue().decode(\"UTF-8\"))\n obj.log.error(\"%s\\n%s\" % (msg, buff.getvalue()))\n exception = TestError(msg)\n exception.stderr.write(buff.getvalue())\n raise exception\n\ndef create_error(obj, msg):\n \"\"\"\n As above, but return the exception rather than raise it, simply to avoid\n too many levels of nested try/except blocks\n \"\"\"\n try:\n raise_error(obj, msg)\n except TestError as error:\n return error\n return TestError(\"Creating error traceback failed\")\n\n\nclass ReturnValue(StopIteration):\n def __init__(self, retval):\n self.retval = retval\n\nclass TestComplete(StopIteration):\n \"\"\"\n Exceptions are used to pass test results around.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TestComplete, self).__init__(*args, **kwargs)\n self.stdout = StringIO()\n self.stderr = StringIO()\n\nclass TestError(TestComplete): pass\n\nclass TestFailure(TestComplete): pass\n\nclass TestSuccess(TestComplete): pass\n\nclass SimFailure(TestComplete): pass\n", "path": "cocotb/result.py"}]}
| 1,701 | 127 |
gh_patches_debug_14825
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1311
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log rotation does not works
In the file https://github.com/CTFd/CTFd/blob/master/CTFd/utils/initialization/__init__.py#L107
the log handlers `logging.handlers.RotatingFileHandler` are not created with the right parameters (see [RotatingFileHandler](https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler)) because _if either of maxBytes or backupCount is zero, rollover never occurs_ (`backupCount` parameter is missing)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/initialization/__init__.py`
Content:
```
1 import datetime
2 import logging
3 import os
4 import sys
5
6 from flask import abort, redirect, render_template, request, session, url_for
7 from sqlalchemy.exc import IntegrityError, InvalidRequestError
8 from werkzeug.wsgi import DispatcherMiddleware
9
10 from CTFd.exceptions import UserNotFoundException, UserTokenExpiredException
11 from CTFd.models import Tracking, db
12 from CTFd.utils import config, get_config, markdown
13 from CTFd.utils.config import (
14 can_send_mail,
15 ctf_logo,
16 ctf_name,
17 ctf_theme,
18 integrations,
19 is_setup,
20 )
21 from CTFd.utils.config.pages import get_pages
22 from CTFd.utils.config.visibility import (
23 accounts_visible,
24 challenges_visible,
25 registration_visible,
26 scores_visible,
27 )
28 from CTFd.utils.countries import get_countries, lookup_country_code
29 from CTFd.utils.dates import isoformat, unix_time, unix_time_millis
30 from CTFd.utils.events import EventManager, RedisEventManager
31 from CTFd.utils.modes import generate_account_url, get_mode_as_word
32 from CTFd.utils.plugins import (
33 get_configurable_plugins,
34 get_registered_admin_scripts,
35 get_registered_admin_stylesheets,
36 get_registered_scripts,
37 get_registered_stylesheets,
38 )
39 from CTFd.utils.security.auth import login_user, logout_user, lookup_user_token
40 from CTFd.utils.security.csrf import generate_nonce
41 from CTFd.utils.user import authed, get_current_team, get_current_user, get_ip
42
43
44 def init_template_filters(app):
45 app.jinja_env.filters["markdown"] = markdown
46 app.jinja_env.filters["unix_time"] = unix_time
47 app.jinja_env.filters["unix_time_millis"] = unix_time_millis
48 app.jinja_env.filters["isoformat"] = isoformat
49
50
51 def init_template_globals(app):
52 app.jinja_env.globals.update(config=config)
53 app.jinja_env.globals.update(get_pages=get_pages)
54 app.jinja_env.globals.update(can_send_mail=can_send_mail)
55 app.jinja_env.globals.update(get_ctf_name=ctf_name)
56 app.jinja_env.globals.update(get_ctf_logo=ctf_logo)
57 app.jinja_env.globals.update(get_ctf_theme=ctf_theme)
58 app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)
59 app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)
60 app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)
61 app.jinja_env.globals.update(
62 get_registered_admin_scripts=get_registered_admin_scripts
63 )
64 app.jinja_env.globals.update(
65 get_registered_admin_stylesheets=get_registered_admin_stylesheets
66 )
67 app.jinja_env.globals.update(get_config=get_config)
68 app.jinja_env.globals.update(generate_account_url=generate_account_url)
69 app.jinja_env.globals.update(get_countries=get_countries)
70 app.jinja_env.globals.update(lookup_country_code=lookup_country_code)
71 app.jinja_env.globals.update(accounts_visible=accounts_visible)
72 app.jinja_env.globals.update(challenges_visible=challenges_visible)
73 app.jinja_env.globals.update(registration_visible=registration_visible)
74 app.jinja_env.globals.update(scores_visible=scores_visible)
75 app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)
76 app.jinja_env.globals.update(integrations=integrations)
77 app.jinja_env.globals.update(authed=authed)
78
79
80 def init_logs(app):
81 logger_submissions = logging.getLogger("submissions")
82 logger_logins = logging.getLogger("logins")
83 logger_registrations = logging.getLogger("registrations")
84
85 logger_submissions.setLevel(logging.INFO)
86 logger_logins.setLevel(logging.INFO)
87 logger_registrations.setLevel(logging.INFO)
88
89 log_dir = app.config["LOG_FOLDER"]
90 if not os.path.exists(log_dir):
91 os.makedirs(log_dir)
92
93 logs = {
94 "submissions": os.path.join(log_dir, "submissions.log"),
95 "logins": os.path.join(log_dir, "logins.log"),
96 "registrations": os.path.join(log_dir, "registrations.log"),
97 }
98
99 try:
100 for log in logs.values():
101 if not os.path.exists(log):
102 open(log, "a").close()
103
104 submission_log = logging.handlers.RotatingFileHandler(
105 logs["submissions"], maxBytes=10000
106 )
107 login_log = logging.handlers.RotatingFileHandler(logs["logins"], maxBytes=10000)
108 registration_log = logging.handlers.RotatingFileHandler(
109 logs["registrations"], maxBytes=10000
110 )
111
112 logger_submissions.addHandler(submission_log)
113 logger_logins.addHandler(login_log)
114 logger_registrations.addHandler(registration_log)
115 except IOError:
116 pass
117
118 stdout = logging.StreamHandler(stream=sys.stdout)
119
120 logger_submissions.addHandler(stdout)
121 logger_logins.addHandler(stdout)
122 logger_registrations.addHandler(stdout)
123
124 logger_submissions.propagate = 0
125 logger_logins.propagate = 0
126 logger_registrations.propagate = 0
127
128
129 def init_events(app):
130 if app.config.get("CACHE_TYPE") == "redis":
131 app.events_manager = RedisEventManager()
132 elif app.config.get("CACHE_TYPE") == "filesystem":
133 app.events_manager = EventManager()
134 else:
135 app.events_manager = EventManager()
136
137
138 def init_request_processors(app):
139 @app.context_processor
140 def inject_user():
141 if session:
142 return dict(session)
143 return dict()
144
145 @app.url_defaults
146 def inject_theme(endpoint, values):
147 if "theme" not in values and app.url_map.is_endpoint_expecting(
148 endpoint, "theme"
149 ):
150 values["theme"] = ctf_theme()
151
152 @app.before_request
153 def needs_setup():
154 if is_setup() is False:
155 if request.endpoint in (
156 "views.setup",
157 "views.integrations",
158 "views.themes",
159 ):
160 return
161 else:
162 return redirect(url_for("views.setup"))
163
164 @app.before_request
165 def tracker():
166 if request.endpoint == "views.themes":
167 return
168
169 if authed():
170 track = Tracking.query.filter_by(ip=get_ip(), user_id=session["id"]).first()
171 if not track:
172 visit = Tracking(ip=get_ip(), user_id=session["id"])
173 db.session.add(visit)
174 else:
175 track.date = datetime.datetime.utcnow()
176
177 try:
178 db.session.commit()
179 except (InvalidRequestError, IntegrityError):
180 db.session.rollback()
181 logout_user()
182
183 if authed():
184 user = get_current_user()
185 team = get_current_team()
186
187 if request.path.startswith("/themes") is False:
188 if user and user.banned:
189 return (
190 render_template(
191 "errors/403.html",
192 error="You have been banned from this CTF",
193 ),
194 403,
195 )
196
197 if team and team.banned:
198 return (
199 render_template(
200 "errors/403.html",
201 error="Your team has been banned from this CTF",
202 ),
203 403,
204 )
205
206 db.session.close()
207
208 @app.before_request
209 def tokens():
210 token = request.headers.get("Authorization")
211 if token and request.content_type == "application/json":
212 try:
213 token_type, token = token.split(" ", 1)
214 user = lookup_user_token(token)
215 except UserNotFoundException:
216 abort(401)
217 except UserTokenExpiredException:
218 abort(401)
219 except Exception:
220 abort(401)
221 else:
222 login_user(user)
223
224 @app.before_request
225 def csrf():
226 try:
227 func = app.view_functions[request.endpoint]
228 except KeyError:
229 abort(404)
230 if hasattr(func, "_bypass_csrf"):
231 return
232 if request.headers.get("Authorization"):
233 return
234 if not session.get("nonce"):
235 session["nonce"] = generate_nonce()
236 if request.method not in ("GET", "HEAD", "OPTIONS", "TRACE"):
237 if request.content_type == "application/json":
238 if session["nonce"] != request.headers.get("CSRF-Token"):
239 abort(403)
240 if request.content_type != "application/json":
241 if session["nonce"] != request.form.get("nonce"):
242 abort(403)
243
244 application_root = app.config.get("APPLICATION_ROOT")
245 if application_root != "/":
246
247 @app.before_request
248 def force_subdirectory_redirect():
249 if request.path.startswith(application_root) is False:
250 return redirect(
251 application_root + request.script_root + request.full_path
252 )
253
254 app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py
--- a/CTFd/utils/initialization/__init__.py
+++ b/CTFd/utils/initialization/__init__.py
@@ -102,11 +102,13 @@
open(log, "a").close()
submission_log = logging.handlers.RotatingFileHandler(
- logs["submissions"], maxBytes=10000
+ logs["submissions"], maxBytes=10485760, backupCount=5
+ )
+ login_log = logging.handlers.RotatingFileHandler(
+ logs["logins"], maxBytes=10485760, backupCount=5
)
- login_log = logging.handlers.RotatingFileHandler(logs["logins"], maxBytes=10000)
registration_log = logging.handlers.RotatingFileHandler(
- logs["registrations"], maxBytes=10000
+ logs["registrations"], maxBytes=10485760, backupCount=5
)
logger_submissions.addHandler(submission_log)
|
{"golden_diff": "diff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py\n--- a/CTFd/utils/initialization/__init__.py\n+++ b/CTFd/utils/initialization/__init__.py\n@@ -102,11 +102,13 @@\n open(log, \"a\").close()\n \n submission_log = logging.handlers.RotatingFileHandler(\n- logs[\"submissions\"], maxBytes=10000\n+ logs[\"submissions\"], maxBytes=10485760, backupCount=5\n+ )\n+ login_log = logging.handlers.RotatingFileHandler(\n+ logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n- login_log = logging.handlers.RotatingFileHandler(logs[\"logins\"], maxBytes=10000)\n registration_log = logging.handlers.RotatingFileHandler(\n- logs[\"registrations\"], maxBytes=10000\n+ logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n \n logger_submissions.addHandler(submission_log)\n", "issue": "log rotation does not works\nIn the file https://github.com/CTFd/CTFd/blob/master/CTFd/utils/initialization/__init__.py#L107\r\nthe log handlers `logging.handlers.RotatingFileHandler` are not created with the right parameters (see [RotatingFileHandler](https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler)) because _if either of maxBytes or backupCount is zero, rollover never occurs_ (`backupCount` parameter is missing)\n", "before_files": [{"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.wsgi import DispatcherMiddleware\n\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n)\nfrom CTFd.utils.countries import get_countries, lookup_country_code\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import authed, get_current_team, get_current_user, get_ip\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n\n\ndef init_template_globals(app):\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10000\n )\n login_log = logging.handlers.RotatingFileHandler(logs[\"logins\"], maxBytes=10000)\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10000\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n\n\ndef init_request_processors(app):\n @app.context_processor\n def inject_user():\n if session:\n return dict(session)\n return dict()\n\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n track = Tracking.query.filter_by(ip=get_ip(), user_id=session[\"id\"]).first()\n if not track:\n visit = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(visit)\n else:\n track.date = datetime.datetime.utcnow()\n\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n logout_user()\n\n if authed():\n user = get_current_user()\n team = get_current_team()\n\n if request.path.startswith(\"/themes\") is False:\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"You have been banned from this CTF\",\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n db.session.close()\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401)\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}], "after_files": [{"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.wsgi import DispatcherMiddleware\n\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n)\nfrom CTFd.utils.countries import get_countries, lookup_country_code\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import authed, get_current_team, get_current_user, get_ip\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n\n\ndef init_template_globals(app):\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n\n\ndef init_request_processors(app):\n @app.context_processor\n def inject_user():\n if session:\n return dict(session)\n return dict()\n\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n track = Tracking.query.filter_by(ip=get_ip(), user_id=session[\"id\"]).first()\n if not track:\n visit = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(visit)\n else:\n track.date = datetime.datetime.utcnow()\n\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n logout_user()\n\n if authed():\n user = get_current_user()\n team = get_current_team()\n\n if request.path.startswith(\"/themes\") is False:\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"You have been banned from this CTF\",\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n db.session.close()\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401)\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}]}
| 2,921 | 267 |
gh_patches_debug_36061
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1628
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/build_docs.py`
Content:
```
1 from __future__ import annotations
2
3 import argparse
4 import importlib.metadata
5 import json
6 import os
7 import shutil
8 import subprocess
9 from contextlib import contextmanager
10 from pathlib import Path
11 from typing import TypedDict
12
13 REDIRECT_TEMPLATE = """
14 <!DOCTYPE HTML>
15 <html lang="en-US">
16 <head>
17 <title>Page Redirection</title>
18 <meta charset="UTF-8">
19 <meta http-equiv="refresh" content="0; url={target}">
20 <script type="text/javascript">window.location.href = "{target}"</script>
21 </head>
22 <body>
23 You are being redirected. If this does not work, click <a href='{target}'>this link</a>
24 </body>
25 </html>
26 """
27
28 parser = argparse.ArgumentParser()
29 parser.add_argument("--version", required=False)
30 parser.add_argument("output")
31
32
33 class VersionSpec(TypedDict):
34 versions: list[str]
35 latest: str
36
37
38 @contextmanager
39 def checkout(branch: str) -> None:
40 subprocess.run(["git", "checkout", branch], check=True) # noqa: S603 S607
41 yield
42 subprocess.run(["git", "checkout", "-"], check=True) # noqa: S603 S607
43
44
45 def load_version_spec() -> VersionSpec:
46 versions_file = Path("docs/_static/versions.json")
47 if versions_file.exists():
48 return json.loads(versions_file.read_text())
49 return {"versions": [], "latest": ""}
50
51
52 def build(output_dir: str, version: str | None) -> None:
53 if version is None:
54 version = importlib.metadata.version("litestar").rsplit(".")[0]
55 else:
56 os.environ["_LITESTAR_DOCS_BUILD_VERSION"] = version
57
58 subprocess.run(["make", "docs"], check=True) # noqa: S603 S607
59
60 output_dir = Path(output_dir)
61 output_dir.mkdir()
62 output_dir.joinpath(".nojekyll").touch(exist_ok=True)
63
64 version_spec = load_version_spec()
65 is_latest = version == version_spec["latest"]
66
67 docs_src_path = Path("docs/_build/html")
68
69 output_dir.joinpath("index.html").write_text(REDIRECT_TEMPLATE.format(target="latest"))
70
71 if is_latest:
72 shutil.copytree(docs_src_path, output_dir / "latest", dirs_exist_ok=True)
73 shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)
74
75 # copy existing versions into our output dir to preserve them when cleaning the branch
76 with checkout("gh-pages"):
77 for other_version in [*version_spec["versions"], "latest"]:
78 other_version_path = Path(other_version)
79 other_version_target_path = output_dir / other_version
80 if other_version_path.exists() and not other_version_target_path.exists():
81 shutil.copytree(other_version_path, other_version_target_path)
82
83
84 def main() -> None:
85 args = parser.parse_args()
86 build(output_dir=args.output, version=args.version)
87
88
89 if __name__ == "__main__":
90 main()
91
```
Path: `tools/sphinx_ext/run_examples.py`
Content:
```
1 from __future__ import annotations
2
3 import importlib
4 import logging
5 import multiprocessing
6 import os
7 import re
8 import shlex
9 import socket
10 import subprocess
11 import sys
12 import time
13 from contextlib import contextmanager, redirect_stderr
14 from pathlib import Path
15 from typing import TYPE_CHECKING, Generator
16
17 import httpx
18 import uvicorn
19 from auto_pytabs.sphinx_ext import CodeBlockOverride, LiteralIncludeOverride
20 from docutils.nodes import Node, admonition, literal_block, title
21 from docutils.parsers.rst import directives
22 from sphinx.addnodes import highlightlang
23
24 from litestar import Litestar
25
26 if TYPE_CHECKING:
27 from sphinx.application import Sphinx
28
29
30 RGX_RUN = re.compile(r"# +?run:(.*)")
31
32 AVAILABLE_PORTS = list(range(9000, 9999))
33
34
35 logger = logging.getLogger("sphinx")
36
37
38 def _load_app_from_path(path: Path) -> Litestar:
39 module = importlib.import_module(str(path.with_suffix("")).replace("/", "."))
40 for obj in module.__dict__.values():
41 if isinstance(obj, Litestar):
42 return obj
43 raise RuntimeError(f"No Litestar app found in {path}")
44
45
46 @contextmanager
47 def run_app(path: Path) -> Generator[int, None, None]:
48 """Run an example app from a python file.
49
50 The first ``Litestar`` instance found in the file will be used as target to run.
51 """
52 while AVAILABLE_PORTS:
53 port = AVAILABLE_PORTS.pop()
54 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
55 if sock.connect_ex(("127.0.0.1", port)) != 0:
56 break
57 else:
58 raise RuntimeError("Could not find an open port")
59
60 app = _load_app_from_path(path)
61
62 def run() -> None:
63 with redirect_stderr(Path(os.devnull).open()):
64 uvicorn.run(app, port=port, access_log=False)
65
66 proc = multiprocessing.Process(target=run)
67 proc.start()
68 for _ in range(50):
69 try:
70 httpx.get(f"http://127.0.0.1:{port}", timeout=0.1)
71 except httpx.TransportError:
72 time.sleep(0.1)
73 else:
74 break
75 try:
76 yield port
77 finally:
78 proc.kill()
79 AVAILABLE_PORTS.append(port)
80
81
82 def extract_run_args(content: str) -> tuple[str, list[list[str]]]:
83 """Extract run args from a python file.
84
85 Return the file content stripped of the run comments and a list of argument lists
86 """
87 new_lines = []
88 run_configs = []
89 for line in content.splitlines():
90 if run_stmt_match := RGX_RUN.match(line):
91 run_stmt = run_stmt_match.group(1).lstrip()
92 run_configs.append(shlex.split(run_stmt))
93 else:
94 new_lines.append(line)
95 return "\n".join(new_lines), run_configs
96
97
98 def exec_examples(app_file: Path, run_configs: list[list[str]]) -> str:
99 """Start a server with the example application, run the specified requests against it
100 and return their results
101 """
102
103 results = []
104
105 with run_app(app_file) as port:
106 for run_args in run_configs:
107 url_path, *options = run_args
108 args = ["curl", "-s", f"http://127.0.0.1:{port}{url_path}", *options]
109 clean_args = ["curl", f"http://127.0.0.1:8000{url_path}", *options]
110
111 proc = subprocess.run(
112 args, # noqa: S603
113 capture_output=True,
114 text=True,
115 )
116 stdout = proc.stdout.splitlines()
117 if not stdout:
118 logger.error(f"Example: {app_file}:{args} yielded no results")
119 continue
120
121 result = "\n".join(line for line in ("> " + (" ".join(clean_args)), *stdout))
122 results.append(result)
123
124 return "\n".join(results)
125
126
127 class LiteralInclude(LiteralIncludeOverride):
128 option_spec = {**LiteralIncludeOverride.option_spec, "no-run": directives.flag}
129
130 def run(self) -> list[Node]:
131 cwd = Path.cwd()
132 docs_dir = cwd / "docs"
133 language = self.options.get("language")
134 file = Path(self.env.relfn2path(self.arguments[0])[1])
135
136 if (language != "python" and file.suffix != ".py") or "no-run" in self.options:
137 return super().run()
138
139 content = file.read_text()
140 clean_content, run_args = extract_run_args(content)
141
142 if not run_args:
143 return super().run()
144
145 tmp_file = self.env.tmp_examples_path / str(file.relative_to(docs_dir)).replace("/", "_")
146
147 self.arguments[0] = "/" + str(tmp_file.relative_to(docs_dir))
148 tmp_file.write_text(clean_content)
149
150 nodes = super().run()
151
152 result = exec_examples(file.relative_to(cwd), run_args)
153
154 nodes.append(
155 admonition(
156 "",
157 title("", "Run it"),
158 highlightlang(
159 "",
160 literal_block("", result),
161 lang="shell",
162 force=False,
163 linenothreshold=sys.maxsize,
164 ),
165 literal_block("", result),
166 )
167 )
168
169 return nodes
170
171
172 def setup(app: Sphinx) -> dict[str, bool]:
173 app.add_directive("literalinclude", LiteralInclude, override=True)
174 app.add_directive("code-block", CodeBlockOverride, override=True)
175
176 return {"parallel_read_safe": True, "parallel_write_safe": True}
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/build_docs.py b/tools/build_docs.py
--- a/tools/build_docs.py
+++ b/tools/build_docs.py
@@ -27,6 +27,7 @@
parser = argparse.ArgumentParser()
parser.add_argument("--version", required=False)
+parser.add_argument("--ignore-missing-examples-output", action="store_true", default=False)
parser.add_argument("output")
@@ -49,12 +50,15 @@
return {"versions": [], "latest": ""}
-def build(output_dir: str, version: str | None) -> None:
+def build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:
if version is None:
version = importlib.metadata.version("litestar").rsplit(".")[0]
else:
os.environ["_LITESTAR_DOCS_BUILD_VERSION"] = version
+ if ignore_missing_output:
+ os.environ["_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT"] = "1"
+
subprocess.run(["make", "docs"], check=True) # noqa: S603 S607
output_dir = Path(output_dir)
@@ -83,7 +87,11 @@
def main() -> None:
args = parser.parse_args()
- build(output_dir=args.output, version=args.version)
+ build(
+ output_dir=args.output,
+ version=args.version,
+ ignore_missing_output=args.ignore_missing_output,
+ )
if __name__ == "__main__":
diff --git a/tools/sphinx_ext/run_examples.py b/tools/sphinx_ext/run_examples.py
--- a/tools/sphinx_ext/run_examples.py
+++ b/tools/sphinx_ext/run_examples.py
@@ -34,6 +34,8 @@
logger = logging.getLogger("sphinx")
+ignore_missing_output = os.getenv("_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT", "") == "1"
+
def _load_app_from_path(path: Path) -> Litestar:
module = importlib.import_module(str(path.with_suffix("")).replace("/", "."))
@@ -115,7 +117,9 @@
)
stdout = proc.stdout.splitlines()
if not stdout:
- logger.error(f"Example: {app_file}:{args} yielded no results")
+ logger.debug(proc.stderr)
+ if not ignore_missing_output:
+ logger.error(f"Example: {app_file}:{args} yielded no results")
continue
result = "\n".join(line for line in ("> " + (" ".join(clean_args)), *stdout))
|
{"golden_diff": "diff --git a/tools/build_docs.py b/tools/build_docs.py\n--- a/tools/build_docs.py\n+++ b/tools/build_docs.py\n@@ -27,6 +27,7 @@\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--version\", required=False)\n+parser.add_argument(\"--ignore-missing-examples-output\", action=\"store_true\", default=False)\n parser.add_argument(\"output\")\n \n \n@@ -49,12 +50,15 @@\n return {\"versions\": [], \"latest\": \"\"}\n \n \n-def build(output_dir: str, version: str | None) -> None:\n+def build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:\n if version is None:\n version = importlib.metadata.version(\"litestar\").rsplit(\".\")[0]\n else:\n os.environ[\"_LITESTAR_DOCS_BUILD_VERSION\"] = version\n \n+ if ignore_missing_output:\n+ os.environ[\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\"] = \"1\"\n+\n subprocess.run([\"make\", \"docs\"], check=True) # noqa: S603 S607\n \n output_dir = Path(output_dir)\n@@ -83,7 +87,11 @@\n \n def main() -> None:\n args = parser.parse_args()\n- build(output_dir=args.output, version=args.version)\n+ build(\n+ output_dir=args.output,\n+ version=args.version,\n+ ignore_missing_output=args.ignore_missing_output,\n+ )\n \n \n if __name__ == \"__main__\":\ndiff --git a/tools/sphinx_ext/run_examples.py b/tools/sphinx_ext/run_examples.py\n--- a/tools/sphinx_ext/run_examples.py\n+++ b/tools/sphinx_ext/run_examples.py\n@@ -34,6 +34,8 @@\n \n logger = logging.getLogger(\"sphinx\")\n \n+ignore_missing_output = os.getenv(\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\", \"\") == \"1\"\n+\n \n def _load_app_from_path(path: Path) -> Litestar:\n module = importlib.import_module(str(path.with_suffix(\"\")).replace(\"/\", \".\"))\n@@ -115,7 +117,9 @@\n )\n stdout = proc.stdout.splitlines()\n if not stdout:\n- logger.error(f\"Example: {app_file}:{args} yielded no results\")\n+ logger.debug(proc.stderr)\n+ if not ignore_missing_output:\n+ logger.error(f\"Example: {app_file}:{args} yielded no results\")\n continue\n \n result = \"\\n\".join(line for line in (\"> \" + (\" \".join(clean_args)), *stdout))\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport importlib.metadata\nimport json\nimport os\nimport shutil\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import TypedDict\n\nREDIRECT_TEMPLATE = \"\"\"\n<!DOCTYPE HTML>\n<html lang=\"en-US\">\n <head>\n <title>Page Redirection</title>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"refresh\" content=\"0; url={target}\">\n <script type=\"text/javascript\">window.location.href = \"{target}\"</script>\n </head>\n <body>\n You are being redirected. If this does not work, click <a href='{target}'>this link</a>\n </body>\n</html>\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--version\", required=False)\nparser.add_argument(\"output\")\n\n\nclass VersionSpec(TypedDict):\n versions: list[str]\n latest: str\n\n\n@contextmanager\ndef checkout(branch: str) -> None:\n subprocess.run([\"git\", \"checkout\", branch], check=True) # noqa: S603 S607\n yield\n subprocess.run([\"git\", \"checkout\", \"-\"], check=True) # noqa: S603 S607\n\n\ndef load_version_spec() -> VersionSpec:\n versions_file = Path(\"docs/_static/versions.json\")\n if versions_file.exists():\n return json.loads(versions_file.read_text())\n return {\"versions\": [], \"latest\": \"\"}\n\n\ndef build(output_dir: str, version: str | None) -> None:\n if version is None:\n version = importlib.metadata.version(\"litestar\").rsplit(\".\")[0]\n else:\n os.environ[\"_LITESTAR_DOCS_BUILD_VERSION\"] = version\n\n subprocess.run([\"make\", \"docs\"], check=True) # noqa: S603 S607\n\n output_dir = Path(output_dir)\n output_dir.mkdir()\n output_dir.joinpath(\".nojekyll\").touch(exist_ok=True)\n\n version_spec = load_version_spec()\n is_latest = version == version_spec[\"latest\"]\n\n docs_src_path = Path(\"docs/_build/html\")\n\n output_dir.joinpath(\"index.html\").write_text(REDIRECT_TEMPLATE.format(target=\"latest\"))\n\n if is_latest:\n shutil.copytree(docs_src_path, output_dir / \"latest\", dirs_exist_ok=True)\n shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)\n\n # copy existing versions into our output dir to preserve them when cleaning the branch\n with checkout(\"gh-pages\"):\n for other_version in [*version_spec[\"versions\"], \"latest\"]:\n other_version_path = Path(other_version)\n other_version_target_path = output_dir / other_version\n if other_version_path.exists() and not other_version_target_path.exists():\n shutil.copytree(other_version_path, other_version_target_path)\n\n\ndef main() -> None:\n args = parser.parse_args()\n build(output_dir=args.output, version=args.version)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/build_docs.py"}, {"content": "from __future__ import annotations\n\nimport importlib\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport shlex\nimport socket\nimport subprocess\nimport sys\nimport time\nfrom contextlib import contextmanager, redirect_stderr\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Generator\n\nimport httpx\nimport uvicorn\nfrom auto_pytabs.sphinx_ext import CodeBlockOverride, LiteralIncludeOverride\nfrom docutils.nodes import Node, admonition, literal_block, title\nfrom docutils.parsers.rst import directives\nfrom sphinx.addnodes import highlightlang\n\nfrom litestar import Litestar\n\nif TYPE_CHECKING:\n from sphinx.application import Sphinx\n\n\nRGX_RUN = re.compile(r\"# +?run:(.*)\")\n\nAVAILABLE_PORTS = list(range(9000, 9999))\n\n\nlogger = logging.getLogger(\"sphinx\")\n\n\ndef _load_app_from_path(path: Path) -> Litestar:\n module = importlib.import_module(str(path.with_suffix(\"\")).replace(\"/\", \".\"))\n for obj in module.__dict__.values():\n if isinstance(obj, Litestar):\n return obj\n raise RuntimeError(f\"No Litestar app found in {path}\")\n\n\n@contextmanager\ndef run_app(path: Path) -> Generator[int, None, None]:\n \"\"\"Run an example app from a python file.\n\n The first ``Litestar`` instance found in the file will be used as target to run.\n \"\"\"\n while AVAILABLE_PORTS:\n port = AVAILABLE_PORTS.pop()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n if sock.connect_ex((\"127.0.0.1\", port)) != 0:\n break\n else:\n raise RuntimeError(\"Could not find an open port\")\n\n app = _load_app_from_path(path)\n\n def run() -> None:\n with redirect_stderr(Path(os.devnull).open()):\n uvicorn.run(app, port=port, access_log=False)\n\n proc = multiprocessing.Process(target=run)\n proc.start()\n for _ in range(50):\n try:\n httpx.get(f\"http://127.0.0.1:{port}\", timeout=0.1)\n except httpx.TransportError:\n time.sleep(0.1)\n else:\n break\n try:\n yield port\n finally:\n proc.kill()\n AVAILABLE_PORTS.append(port)\n\n\ndef extract_run_args(content: str) -> tuple[str, list[list[str]]]:\n \"\"\"Extract run args from a python file.\n\n Return the file content stripped of the run comments and a list of argument lists\n \"\"\"\n new_lines = []\n run_configs = []\n for line in content.splitlines():\n if run_stmt_match := RGX_RUN.match(line):\n run_stmt = run_stmt_match.group(1).lstrip()\n run_configs.append(shlex.split(run_stmt))\n else:\n new_lines.append(line)\n return \"\\n\".join(new_lines), run_configs\n\n\ndef exec_examples(app_file: Path, run_configs: list[list[str]]) -> str:\n \"\"\"Start a server with the example application, run the specified requests against it\n and return their results\n \"\"\"\n\n results = []\n\n with run_app(app_file) as port:\n for run_args in run_configs:\n url_path, *options = run_args\n args = [\"curl\", \"-s\", f\"http://127.0.0.1:{port}{url_path}\", *options]\n clean_args = [\"curl\", f\"http://127.0.0.1:8000{url_path}\", *options]\n\n proc = subprocess.run(\n args, # noqa: S603\n capture_output=True,\n text=True,\n )\n stdout = proc.stdout.splitlines()\n if not stdout:\n logger.error(f\"Example: {app_file}:{args} yielded no results\")\n continue\n\n result = \"\\n\".join(line for line in (\"> \" + (\" \".join(clean_args)), *stdout))\n results.append(result)\n\n return \"\\n\".join(results)\n\n\nclass LiteralInclude(LiteralIncludeOverride):\n option_spec = {**LiteralIncludeOverride.option_spec, \"no-run\": directives.flag}\n\n def run(self) -> list[Node]:\n cwd = Path.cwd()\n docs_dir = cwd / \"docs\"\n language = self.options.get(\"language\")\n file = Path(self.env.relfn2path(self.arguments[0])[1])\n\n if (language != \"python\" and file.suffix != \".py\") or \"no-run\" in self.options:\n return super().run()\n\n content = file.read_text()\n clean_content, run_args = extract_run_args(content)\n\n if not run_args:\n return super().run()\n\n tmp_file = self.env.tmp_examples_path / str(file.relative_to(docs_dir)).replace(\"/\", \"_\")\n\n self.arguments[0] = \"/\" + str(tmp_file.relative_to(docs_dir))\n tmp_file.write_text(clean_content)\n\n nodes = super().run()\n\n result = exec_examples(file.relative_to(cwd), run_args)\n\n nodes.append(\n admonition(\n \"\",\n title(\"\", \"Run it\"),\n highlightlang(\n \"\",\n literal_block(\"\", result),\n lang=\"shell\",\n force=False,\n linenothreshold=sys.maxsize,\n ),\n literal_block(\"\", result),\n )\n )\n\n return nodes\n\n\ndef setup(app: Sphinx) -> dict[str, bool]:\n app.add_directive(\"literalinclude\", LiteralInclude, override=True)\n app.add_directive(\"code-block\", CodeBlockOverride, override=True)\n\n return {\"parallel_read_safe\": True, \"parallel_write_safe\": True}\n", "path": "tools/sphinx_ext/run_examples.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport importlib.metadata\nimport json\nimport os\nimport shutil\nimport subprocess\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import TypedDict\n\nREDIRECT_TEMPLATE = \"\"\"\n<!DOCTYPE HTML>\n<html lang=\"en-US\">\n <head>\n <title>Page Redirection</title>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"refresh\" content=\"0; url={target}\">\n <script type=\"text/javascript\">window.location.href = \"{target}\"</script>\n </head>\n <body>\n You are being redirected. If this does not work, click <a href='{target}'>this link</a>\n </body>\n</html>\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--version\", required=False)\nparser.add_argument(\"--ignore-missing-examples-output\", action=\"store_true\", default=False)\nparser.add_argument(\"output\")\n\n\nclass VersionSpec(TypedDict):\n versions: list[str]\n latest: str\n\n\n@contextmanager\ndef checkout(branch: str) -> None:\n subprocess.run([\"git\", \"checkout\", branch], check=True) # noqa: S603 S607\n yield\n subprocess.run([\"git\", \"checkout\", \"-\"], check=True) # noqa: S603 S607\n\n\ndef load_version_spec() -> VersionSpec:\n versions_file = Path(\"docs/_static/versions.json\")\n if versions_file.exists():\n return json.loads(versions_file.read_text())\n return {\"versions\": [], \"latest\": \"\"}\n\n\ndef build(output_dir: str, version: str | None, ignore_missing_output: bool) -> None:\n if version is None:\n version = importlib.metadata.version(\"litestar\").rsplit(\".\")[0]\n else:\n os.environ[\"_LITESTAR_DOCS_BUILD_VERSION\"] = version\n\n if ignore_missing_output:\n os.environ[\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\"] = \"1\"\n\n subprocess.run([\"make\", \"docs\"], check=True) # noqa: S603 S607\n\n output_dir = Path(output_dir)\n output_dir.mkdir()\n output_dir.joinpath(\".nojekyll\").touch(exist_ok=True)\n\n version_spec = load_version_spec()\n is_latest = version == version_spec[\"latest\"]\n\n docs_src_path = Path(\"docs/_build/html\")\n\n output_dir.joinpath(\"index.html\").write_text(REDIRECT_TEMPLATE.format(target=\"latest\"))\n\n if is_latest:\n shutil.copytree(docs_src_path, output_dir / \"latest\", dirs_exist_ok=True)\n shutil.copytree(docs_src_path, output_dir / version, dirs_exist_ok=True)\n\n # copy existing versions into our output dir to preserve them when cleaning the branch\n with checkout(\"gh-pages\"):\n for other_version in [*version_spec[\"versions\"], \"latest\"]:\n other_version_path = Path(other_version)\n other_version_target_path = output_dir / other_version\n if other_version_path.exists() and not other_version_target_path.exists():\n shutil.copytree(other_version_path, other_version_target_path)\n\n\ndef main() -> None:\n args = parser.parse_args()\n build(\n output_dir=args.output,\n version=args.version,\n ignore_missing_output=args.ignore_missing_output,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/build_docs.py"}, {"content": "from __future__ import annotations\n\nimport importlib\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport shlex\nimport socket\nimport subprocess\nimport sys\nimport time\nfrom contextlib import contextmanager, redirect_stderr\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Generator\n\nimport httpx\nimport uvicorn\nfrom auto_pytabs.sphinx_ext import CodeBlockOverride, LiteralIncludeOverride\nfrom docutils.nodes import Node, admonition, literal_block, title\nfrom docutils.parsers.rst import directives\nfrom sphinx.addnodes import highlightlang\n\nfrom litestar import Litestar\n\nif TYPE_CHECKING:\n from sphinx.application import Sphinx\n\n\nRGX_RUN = re.compile(r\"# +?run:(.*)\")\n\nAVAILABLE_PORTS = list(range(9000, 9999))\n\n\nlogger = logging.getLogger(\"sphinx\")\n\nignore_missing_output = os.getenv(\"_LITESTAR_DOCS_IGNORE_MISSING_EXAMPLE_OUTPUT\", \"\") == \"1\"\n\n\ndef _load_app_from_path(path: Path) -> Litestar:\n module = importlib.import_module(str(path.with_suffix(\"\")).replace(\"/\", \".\"))\n for obj in module.__dict__.values():\n if isinstance(obj, Litestar):\n return obj\n raise RuntimeError(f\"No Litestar app found in {path}\")\n\n\n@contextmanager\ndef run_app(path: Path) -> Generator[int, None, None]:\n \"\"\"Run an example app from a python file.\n\n The first ``Litestar`` instance found in the file will be used as target to run.\n \"\"\"\n while AVAILABLE_PORTS:\n port = AVAILABLE_PORTS.pop()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n if sock.connect_ex((\"127.0.0.1\", port)) != 0:\n break\n else:\n raise RuntimeError(\"Could not find an open port\")\n\n app = _load_app_from_path(path)\n\n def run() -> None:\n with redirect_stderr(Path(os.devnull).open()):\n uvicorn.run(app, port=port, access_log=False)\n\n proc = multiprocessing.Process(target=run)\n proc.start()\n for _ in range(50):\n try:\n httpx.get(f\"http://127.0.0.1:{port}\", timeout=0.1)\n except httpx.TransportError:\n time.sleep(0.1)\n else:\n break\n try:\n yield port\n finally:\n proc.kill()\n AVAILABLE_PORTS.append(port)\n\n\ndef extract_run_args(content: str) -> tuple[str, list[list[str]]]:\n \"\"\"Extract run args from a python file.\n\n Return the file content stripped of the run comments and a list of argument lists\n \"\"\"\n new_lines = []\n run_configs = []\n for line in content.splitlines():\n if run_stmt_match := RGX_RUN.match(line):\n run_stmt = run_stmt_match.group(1).lstrip()\n run_configs.append(shlex.split(run_stmt))\n else:\n new_lines.append(line)\n return \"\\n\".join(new_lines), run_configs\n\n\ndef exec_examples(app_file: Path, run_configs: list[list[str]]) -> str:\n \"\"\"Start a server with the example application, run the specified requests against it\n and return their results\n \"\"\"\n\n results = []\n\n with run_app(app_file) as port:\n for run_args in run_configs:\n url_path, *options = run_args\n args = [\"curl\", \"-s\", f\"http://127.0.0.1:{port}{url_path}\", *options]\n clean_args = [\"curl\", f\"http://127.0.0.1:8000{url_path}\", *options]\n\n proc = subprocess.run(\n args, # noqa: S603\n capture_output=True,\n text=True,\n )\n stdout = proc.stdout.splitlines()\n if not stdout:\n logger.debug(proc.stderr)\n if not ignore_missing_output:\n logger.error(f\"Example: {app_file}:{args} yielded no results\")\n continue\n\n result = \"\\n\".join(line for line in (\"> \" + (\" \".join(clean_args)), *stdout))\n results.append(result)\n\n return \"\\n\".join(results)\n\n\nclass LiteralInclude(LiteralIncludeOverride):\n option_spec = {**LiteralIncludeOverride.option_spec, \"no-run\": directives.flag}\n\n def run(self) -> list[Node]:\n cwd = Path.cwd()\n docs_dir = cwd / \"docs\"\n language = self.options.get(\"language\")\n file = Path(self.env.relfn2path(self.arguments[0])[1])\n\n if (language != \"python\" and file.suffix != \".py\") or \"no-run\" in self.options:\n return super().run()\n\n content = file.read_text()\n clean_content, run_args = extract_run_args(content)\n\n if not run_args:\n return super().run()\n\n tmp_file = self.env.tmp_examples_path / str(file.relative_to(docs_dir)).replace(\"/\", \"_\")\n\n self.arguments[0] = \"/\" + str(tmp_file.relative_to(docs_dir))\n tmp_file.write_text(clean_content)\n\n nodes = super().run()\n\n result = exec_examples(file.relative_to(cwd), run_args)\n\n nodes.append(\n admonition(\n \"\",\n title(\"\", \"Run it\"),\n highlightlang(\n \"\",\n literal_block(\"\", result),\n lang=\"shell\",\n force=False,\n linenothreshold=sys.maxsize,\n ),\n literal_block(\"\", result),\n )\n )\n\n return nodes\n\n\ndef setup(app: Sphinx) -> dict[str, bool]:\n app.add_directive(\"literalinclude\", LiteralInclude, override=True)\n app.add_directive(\"code-block\", CodeBlockOverride, override=True)\n\n return {\"parallel_read_safe\": True, \"parallel_write_safe\": True}\n", "path": "tools/sphinx_ext/run_examples.py"}]}
| 2,926 | 555 |
gh_patches_debug_27050
|
rasdani/github-patches
|
git_diff
|
ephios-dev__ephios-112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement Content Security Policy
which means we can't have inline javascript!
* https://docs.djangoproject.com/en/3.1/topics/security/
* https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/event_management/templatetags/event_extras.py`
Content:
```
1 from django import template
2 from django.utils.safestring import mark_safe
3
4 from ephios.event_management.models import AbstractParticipation
5
6 register = template.Library()
7
8
9 @register.filter(name="shift_status")
10 def shift_status(shift, user):
11 participation = user.as_participant().participation_for(shift)
12 if participation is not None:
13 color = {
14 AbstractParticipation.States.USER_DECLINED: "text-danger",
15 AbstractParticipation.States.RESPONSIBLE_REJECTED: "text-danger",
16 AbstractParticipation.States.REQUESTED: "text-warning",
17 AbstractParticipation.States.CONFIRMED: "text-success",
18 }[participation.state]
19 return mark_safe(f'<span class="{color}">{participation.get_state_display()}</span><br>')
20 return ""
21
22
23 @register.filter(name="can_sign_up")
24 def can_sign_up(shift, user):
25 return shift.signup_method.can_sign_up(user.as_participant())
26
27
28 @register.filter(name="render_shift_state")
29 def render_shift_state(shift, request):
30 return shift.signup_method.render_shift_state(request)
31
32
33 @register.filter(name="signup_errors")
34 def signup_errors(shift, user):
35 return shift.signup_method.get_signup_errors(user.as_participant())
36
37
38 @register.filter(name="can_user_decline")
39 def can_user_decline(shift, user):
40 return shift.signup_method.can_decline(user.as_participant())
41
42
43 @register.filter(name="decline_errors")
44 def decline_errors(shift, user):
45 return shift.signup_method.get_decline_errors(user.as_participant())
46
```
Path: `ephios/settings.py`
Content:
```
1 """
2 Django settings for ephios project.
3
4 Generated by 'django-admin startproject' using Django 3.0.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.0/ref/settings/
11 """
12
13 import os
14
15 from email.utils import getaddresses
16
17 import environ
18
19 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
20
21 env = environ.Env()
22 # for syntax see https://django-environ.readthedocs.io/en/latest/
23 environ.Env.read_env(env_file=os.path.join(BASE_DIR, ".env"))
24
25 SECRET_KEY = env.str("SECRET_KEY")
26 DEBUG = env.bool("DEBUG")
27 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS")
28 SITE_URL = env.str("SITE_URL")
29
30 if not DEBUG:
31 SESSION_COOKIE_SECURE = True
32 CSRF_COOKIE_SECURE = True
33 X_FRAME_OPTIONS = "DENY"
34 SECURE_CONTENT_TYPE_NOSNIFF = True
35 SECURE_BROWSER_XSS_FILTER = True
36 SECURE_SSL_REDIRECT = True
37 SECURE_HSTS_SECONDS = 3600
38 SECURE_HSTS_INCLUDE_SUBDOMAINS = True
39 SECURE_REFERRER_POLICY = "same-origin"
40
41 # Application definition
42
43 INSTALLED_APPS = [
44 "django.contrib.admin",
45 "django.contrib.auth",
46 "polymorphic",
47 "django.contrib.contenttypes",
48 "django.contrib.sessions",
49 "django.contrib.messages",
50 "django.contrib.staticfiles",
51 "bootstrap4",
52 "guardian",
53 "django_select2",
54 "jquery",
55 "djangoformsetjs",
56 "ephios.user_management",
57 "ephios.event_management",
58 "ephios.helpers",
59 "ephios.plugins.basesignup",
60 "ephios.plugins.pages",
61 ]
62
63 MIDDLEWARE = [
64 "django.middleware.security.SecurityMiddleware",
65 "django.contrib.sessions.middleware.SessionMiddleware",
66 "django.middleware.common.CommonMiddleware",
67 "django.middleware.csrf.CsrfViewMiddleware",
68 "django.contrib.auth.middleware.AuthenticationMiddleware",
69 "django.contrib.messages.middleware.MessageMiddleware",
70 "django.middleware.clickjacking.XFrameOptionsMiddleware",
71 ]
72
73 ROOT_URLCONF = "ephios.urls"
74
75 TEMPLATES = [
76 {
77 "BACKEND": "django.template.backends.django.DjangoTemplates",
78 "DIRS": [os.path.join(BASE_DIR, "ephios/templates")],
79 "APP_DIRS": True,
80 "OPTIONS": {
81 "context_processors": [
82 "django.template.context_processors.debug",
83 "django.template.context_processors.request",
84 "django.contrib.auth.context_processors.auth",
85 "django.contrib.messages.context_processors.messages",
86 "ephios.helpers.context.ephios_base_context",
87 ],
88 },
89 },
90 ]
91
92 LOCALE_PATHS = (os.path.join(BASE_DIR, "ephios/locale"),)
93
94 WSGI_APPLICATION = "ephios.wsgi.application"
95
96 # Database
97 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
98
99 DATABASES = {"default": env.db_url()}
100
101 # Password validation
102 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
103
104 AUTH_PASSWORD_VALIDATORS = [
105 {
106 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
107 },
108 {
109 "NAME": "user_management.utils.CustomMinimumLengthValidator",
110 },
111 {
112 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
113 },
114 {
115 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
116 },
117 ]
118
119 AUTHENTICATION_BACKENDS = (
120 "django.contrib.auth.backends.ModelBackend",
121 "guardian.backends.ObjectPermissionBackend",
122 )
123
124 AUTH_USER_MODEL = "user_management.UserProfile"
125 LOGIN_REDIRECT_URL = "/"
126
127 # Internationalization
128 # https://docs.djangoproject.com/en/3.0/topics/i18n/
129
130 LANGUAGE_CODE = "de-de"
131
132 TIME_ZONE = "Europe/Berlin"
133
134 USE_I18N = True
135
136 USE_L10N = True
137
138 USE_TZ = True
139
140 # Static files (CSS, JavaScript, Images)
141 # https://docs.djangoproject.com/en/3.0/howto/static-files/
142
143 STATIC_URL = env.str("STATIC_URL")
144 STATIC_ROOT = env.str("STATIC_ROOT")
145 STATICFILES_DIRS = (os.path.join(BASE_DIR, "ephios/static"),)
146
147 # mail configuration
148 EMAIL_CONFIG = env.email_url("EMAIL_URL")
149 vars().update(EMAIL_CONFIG)
150 DEFAULT_FROM_EMAIL = env.str("DEFAULT_FROM_EMAIL")
151 SERVER_EMAIL = env.str("SERVER_EMAIL")
152 ADMINS = getaddresses([env("ADMINS")])
153
154 # Guardian configuration
155 ANONYMOUS_USER_NAME = None
156 GUARDIAN_MONKEY_PATCH = False
157
158 # django-select2
159 # Prevent django-select from loading the select2 resources as we want to serve them locally
160 SELECT2_JS = ""
161 SELECT2_CSS = ""
162 SELECT2_I18N_PATH = ""
163
164 # django-debug-toolbar
165 if DEBUG:
166 INSTALLED_APPS.append("django_extensions")
167 INSTALLED_APPS.append("debug_toolbar")
168 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
169 INTERNAL_IPS = env.str("INTERNAL_IPS")
170
```
Path: `ephios/helpers/context.py`
Content:
```
1 from ephios.helpers.signals import footer_link
2
3
4 def ephios_base_context(request):
5 footer = {}
6 for receiver, result in footer_link.send(None, request=request):
7 for label, url in result.items():
8 footer[label] = url
9
10 return {
11 "footer": footer,
12 }
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py
--- a/ephios/event_management/templatetags/event_extras.py
+++ b/ephios/event_management/templatetags/event_extras.py
@@ -35,8 +35,8 @@
return shift.signup_method.get_signup_errors(user.as_participant())
[email protected](name="can_user_decline")
-def can_user_decline(shift, user):
[email protected](name="can_decline")
+def can_decline(shift, user):
return shift.signup_method.can_decline(user.as_participant())
diff --git a/ephios/helpers/context.py b/ephios/helpers/context.py
--- a/ephios/helpers/context.py
+++ b/ephios/helpers/context.py
@@ -1,3 +1,6 @@
+from django.templatetags.static import static
+from django.utils.translation import get_language
+
from ephios.helpers.signals import footer_link
@@ -7,6 +10,8 @@
for label, url in result.items():
footer[label] = url
- return {
- "footer": footer,
- }
+ datatables_translation_url = None
+ if get_language() == "de-de":
+ datatables_translation_url = static("datatables/german.json")
+
+ return {"footer": footer, "datatables_translation_url": datatables_translation_url}
diff --git a/ephios/settings.py b/ephios/settings.py
--- a/ephios/settings.py
+++ b/ephios/settings.py
@@ -68,6 +68,7 @@
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "csp.middleware.CSPMiddleware",
]
ROOT_URLCONF = "ephios.urls"
|
{"golden_diff": "diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py\n--- a/ephios/event_management/templatetags/event_extras.py\n+++ b/ephios/event_management/templatetags/event_extras.py\n@@ -35,8 +35,8 @@\n return shift.signup_method.get_signup_errors(user.as_participant())\n \n \[email protected](name=\"can_user_decline\")\n-def can_user_decline(shift, user):\[email protected](name=\"can_decline\")\n+def can_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n \n \ndiff --git a/ephios/helpers/context.py b/ephios/helpers/context.py\n--- a/ephios/helpers/context.py\n+++ b/ephios/helpers/context.py\n@@ -1,3 +1,6 @@\n+from django.templatetags.static import static\n+from django.utils.translation import get_language\n+\n from ephios.helpers.signals import footer_link\n \n \n@@ -7,6 +10,8 @@\n for label, url in result.items():\n footer[label] = url\n \n- return {\n- \"footer\": footer,\n- }\n+ datatables_translation_url = None\n+ if get_language() == \"de-de\":\n+ datatables_translation_url = static(\"datatables/german.json\")\n+\n+ return {\"footer\": footer, \"datatables_translation_url\": datatables_translation_url}\ndiff --git a/ephios/settings.py b/ephios/settings.py\n--- a/ephios/settings.py\n+++ b/ephios/settings.py\n@@ -68,6 +68,7 @@\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n+ \"csp.middleware.CSPMiddleware\",\n ]\n \n ROOT_URLCONF = \"ephios.urls\"\n", "issue": "Implement Content Security Policy\nwhich means we can't have inline javascript!\r\n\r\n* https://docs.djangoproject.com/en/3.1/topics/security/\r\n* https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy/script-src\n", "before_files": [{"content": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom ephios.event_management.models import AbstractParticipation\n\nregister = template.Library()\n\n\[email protected](name=\"shift_status\")\ndef shift_status(shift, user):\n participation = user.as_participant().participation_for(shift)\n if participation is not None:\n color = {\n AbstractParticipation.States.USER_DECLINED: \"text-danger\",\n AbstractParticipation.States.RESPONSIBLE_REJECTED: \"text-danger\",\n AbstractParticipation.States.REQUESTED: \"text-warning\",\n AbstractParticipation.States.CONFIRMED: \"text-success\",\n }[participation.state]\n return mark_safe(f'<span class=\"{color}\">{participation.get_state_display()}</span><br>')\n return \"\"\n\n\[email protected](name=\"can_sign_up\")\ndef can_sign_up(shift, user):\n return shift.signup_method.can_sign_up(user.as_participant())\n\n\[email protected](name=\"render_shift_state\")\ndef render_shift_state(shift, request):\n return shift.signup_method.render_shift_state(request)\n\n\[email protected](name=\"signup_errors\")\ndef signup_errors(shift, user):\n return shift.signup_method.get_signup_errors(user.as_participant())\n\n\[email protected](name=\"can_user_decline\")\ndef can_user_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n\n\[email protected](name=\"decline_errors\")\ndef decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n", "path": "ephios/event_management/templatetags/event_extras.py"}, {"content": "\"\"\"\nDjango settings for ephios project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\n\nfrom email.utils import getaddresses\n\nimport environ\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nenv = environ.Env()\n# for syntax see https://django-environ.readthedocs.io/en/latest/\nenviron.Env.read_env(env_file=os.path.join(BASE_DIR, \".env\"))\n\nSECRET_KEY = env.str(\"SECRET_KEY\")\nDEBUG = env.bool(\"DEBUG\")\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\")\nSITE_URL = env.str(\"SITE_URL\")\n\nif not DEBUG:\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n X_FRAME_OPTIONS = \"DENY\"\n SECURE_CONTENT_TYPE_NOSNIFF = True\n SECURE_BROWSER_XSS_FILTER = True\n SECURE_SSL_REDIRECT = True\n SECURE_HSTS_SECONDS = 3600\n SECURE_HSTS_INCLUDE_SUBDOMAINS = True\n SECURE_REFERRER_POLICY = \"same-origin\"\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"polymorphic\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"bootstrap4\",\n \"guardian\",\n \"django_select2\",\n \"jquery\",\n \"djangoformsetjs\",\n \"ephios.user_management\",\n \"ephios.event_management\",\n \"ephios.helpers\",\n \"ephios.plugins.basesignup\",\n \"ephios.plugins.pages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"ephios.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"ephios/templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"ephios.helpers.context.ephios_base_context\",\n ],\n },\n },\n]\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"ephios/locale\"),)\n\nWSGI_APPLICATION = \"ephios.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\"default\": env.db_url()}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"user_management.utils.CustomMinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n)\n\nAUTH_USER_MODEL = \"user_management.UserProfile\"\nLOGIN_REDIRECT_URL = \"/\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"de-de\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = env.str(\"STATIC_URL\")\nSTATIC_ROOT = env.str(\"STATIC_ROOT\")\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"ephios/static\"),)\n\n# mail configuration\nEMAIL_CONFIG = env.email_url(\"EMAIL_URL\")\nvars().update(EMAIL_CONFIG)\nDEFAULT_FROM_EMAIL = env.str(\"DEFAULT_FROM_EMAIL\")\nSERVER_EMAIL = env.str(\"SERVER_EMAIL\")\nADMINS = getaddresses([env(\"ADMINS\")])\n\n# Guardian configuration\nANONYMOUS_USER_NAME = None\nGUARDIAN_MONKEY_PATCH = False\n\n# django-select2\n# Prevent django-select from loading the select2 resources as we want to serve them locally\nSELECT2_JS = \"\"\nSELECT2_CSS = \"\"\nSELECT2_I18N_PATH = \"\"\n\n# django-debug-toolbar\nif DEBUG:\n INSTALLED_APPS.append(\"django_extensions\")\n INSTALLED_APPS.append(\"debug_toolbar\")\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n INTERNAL_IPS = env.str(\"INTERNAL_IPS\")\n", "path": "ephios/settings.py"}, {"content": "from ephios.helpers.signals import footer_link\n\n\ndef ephios_base_context(request):\n footer = {}\n for receiver, result in footer_link.send(None, request=request):\n for label, url in result.items():\n footer[label] = url\n\n return {\n \"footer\": footer,\n }\n", "path": "ephios/helpers/context.py"}], "after_files": [{"content": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom ephios.event_management.models import AbstractParticipation\n\nregister = template.Library()\n\n\[email protected](name=\"shift_status\")\ndef shift_status(shift, user):\n participation = user.as_participant().participation_for(shift)\n if participation is not None:\n color = {\n AbstractParticipation.States.USER_DECLINED: \"text-danger\",\n AbstractParticipation.States.RESPONSIBLE_REJECTED: \"text-danger\",\n AbstractParticipation.States.REQUESTED: \"text-warning\",\n AbstractParticipation.States.CONFIRMED: \"text-success\",\n }[participation.state]\n return mark_safe(f'<span class=\"{color}\">{participation.get_state_display()}</span><br>')\n return \"\"\n\n\[email protected](name=\"can_sign_up\")\ndef can_sign_up(shift, user):\n return shift.signup_method.can_sign_up(user.as_participant())\n\n\[email protected](name=\"render_shift_state\")\ndef render_shift_state(shift, request):\n return shift.signup_method.render_shift_state(request)\n\n\[email protected](name=\"signup_errors\")\ndef signup_errors(shift, user):\n return shift.signup_method.get_signup_errors(user.as_participant())\n\n\[email protected](name=\"can_decline\")\ndef can_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n\n\[email protected](name=\"decline_errors\")\ndef decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n", "path": "ephios/event_management/templatetags/event_extras.py"}, {"content": "\"\"\"\nDjango settings for ephios project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\n\nfrom email.utils import getaddresses\n\nimport environ\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nenv = environ.Env()\n# for syntax see https://django-environ.readthedocs.io/en/latest/\nenviron.Env.read_env(env_file=os.path.join(BASE_DIR, \".env\"))\n\nSECRET_KEY = env.str(\"SECRET_KEY\")\nDEBUG = env.bool(\"DEBUG\")\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\")\nSITE_URL = env.str(\"SITE_URL\")\n\nif not DEBUG:\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n X_FRAME_OPTIONS = \"DENY\"\n SECURE_CONTENT_TYPE_NOSNIFF = True\n SECURE_BROWSER_XSS_FILTER = True\n SECURE_SSL_REDIRECT = True\n SECURE_HSTS_SECONDS = 3600\n SECURE_HSTS_INCLUDE_SUBDOMAINS = True\n SECURE_REFERRER_POLICY = \"same-origin\"\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"polymorphic\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"bootstrap4\",\n \"guardian\",\n \"django_select2\",\n \"jquery\",\n \"djangoformsetjs\",\n \"ephios.user_management\",\n \"ephios.event_management\",\n \"ephios.helpers\",\n \"ephios.plugins.basesignup\",\n \"ephios.plugins.pages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n]\n\nROOT_URLCONF = \"ephios.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"ephios/templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"ephios.helpers.context.ephios_base_context\",\n ],\n },\n },\n]\n\nLOCALE_PATHS = (os.path.join(BASE_DIR, \"ephios/locale\"),)\n\nWSGI_APPLICATION = \"ephios.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\"default\": env.db_url()}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"user_management.utils.CustomMinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"guardian.backends.ObjectPermissionBackend\",\n)\n\nAUTH_USER_MODEL = \"user_management.UserProfile\"\nLOGIN_REDIRECT_URL = \"/\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"de-de\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = env.str(\"STATIC_URL\")\nSTATIC_ROOT = env.str(\"STATIC_ROOT\")\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"ephios/static\"),)\n\n# mail configuration\nEMAIL_CONFIG = env.email_url(\"EMAIL_URL\")\nvars().update(EMAIL_CONFIG)\nDEFAULT_FROM_EMAIL = env.str(\"DEFAULT_FROM_EMAIL\")\nSERVER_EMAIL = env.str(\"SERVER_EMAIL\")\nADMINS = getaddresses([env(\"ADMINS\")])\n\n# Guardian configuration\nANONYMOUS_USER_NAME = None\nGUARDIAN_MONKEY_PATCH = False\n\n# django-select2\n# Prevent django-select from loading the select2 resources as we want to serve them locally\nSELECT2_JS = \"\"\nSELECT2_CSS = \"\"\nSELECT2_I18N_PATH = \"\"\n\n# django-debug-toolbar\nif DEBUG:\n INSTALLED_APPS.append(\"django_extensions\")\n INSTALLED_APPS.append(\"debug_toolbar\")\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n INTERNAL_IPS = env.str(\"INTERNAL_IPS\")\n", "path": "ephios/settings.py"}, {"content": "from django.templatetags.static import static\nfrom django.utils.translation import get_language\n\nfrom ephios.helpers.signals import footer_link\n\n\ndef ephios_base_context(request):\n footer = {}\n for receiver, result in footer_link.send(None, request=request):\n for label, url in result.items():\n footer[label] = url\n\n datatables_translation_url = None\n if get_language() == \"de-de\":\n datatables_translation_url = static(\"datatables/german.json\")\n\n return {\"footer\": footer, \"datatables_translation_url\": datatables_translation_url}\n", "path": "ephios/helpers/context.py"}]}
| 2,352 | 418 |
gh_patches_debug_19252
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-658
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[cliquet] Add a way to change the port used when using kinto start.
original: https://github.com/mozilla-services/cliquet/issues/686
All in title
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__main__.py`
Content:
```
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5
6 from six.moves import input
7 from kinto.core import scripts
8 from pyramid.scripts import pserve
9 from pyramid.paster import bootstrap
10 from kinto import __version__
11 from kinto.config import init
12
13 CONFIG_FILE = 'config/kinto.ini'
14
15
16 def main(args=None):
17 """The main routine."""
18 if args is None:
19 args = sys.argv[1:]
20
21 parser = argparse.ArgumentParser(description="Kinto commands")
22 parser.add_argument('--ini',
23 help='Application configuration file',
24 dest='ini_file',
25 required=False,
26 default=CONFIG_FILE)
27 parser.add_argument('--backend',
28 help='Specify backend',
29 dest='backend',
30 required=False,
31 default=None)
32
33 parser.add_argument('-v', '--version',
34 action='version', version=__version__,
35 help='Print the Kinto version and exit.')
36
37 subparsers = parser.add_subparsers(title='subcommands',
38 description='valid subcommands',
39 dest='subcommand',
40 help='init/start/migrate')
41 subparsers.required = True
42
43 parser_init = subparsers.add_parser('init')
44 parser_init.set_defaults(which='init')
45
46 parser_migrate = subparsers.add_parser('migrate')
47 parser_migrate.set_defaults(which='migrate')
48
49 parser_start = subparsers.add_parser('start')
50 parser_start.add_argument('--reload',
51 action='store_true',
52 help='Restart when code or config changes',
53 required=False,
54 default=False)
55 parser_start.set_defaults(which='start')
56
57 parsed_args = vars(parser.parse_args(args))
58
59 config_file = parsed_args['ini_file']
60
61 if parsed_args['which'] == 'init':
62 if os.path.exists(config_file):
63 print("%s already exists." % config_file, file=sys.stderr)
64 return 1
65
66 backend = parsed_args['backend']
67 if not backend:
68 while True:
69 prompt = ("Select the backend you would like to use: "
70 "(1 - postgresql, 2 - redis, default - memory) ")
71 answer = input(prompt).strip()
72 try:
73 backends = {"1": "postgresql", "2": "redis", "": "memory"}
74 backend = backends[answer]
75 break
76 except KeyError:
77 pass
78
79 init(config_file, backend)
80
81 # Install postgresql libraries if necessary
82 if backend == "postgresql":
83 try:
84 import psycopg2 # NOQA
85 except ImportError:
86 import pip
87 pip.main(['install', "kinto[postgresql]"])
88
89 elif parsed_args['which'] == 'migrate':
90 env = bootstrap(config_file)
91 scripts.migrate(env)
92
93 elif parsed_args['which'] == 'start':
94 pserve_argv = ['pserve', config_file]
95 if parsed_args['reload']:
96 pserve_argv.append('--reload')
97 pserve.main(pserve_argv)
98
99 return 0
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -52,6 +52,11 @@
help='Restart when code or config changes',
required=False,
default=False)
+ parser_start.add_argument('--port',
+ type=int,
+ help='Listening port number',
+ required=False,
+ default=8888)
parser_start.set_defaults(which='start')
parsed_args = vars(parser.parse_args(args))
@@ -94,6 +99,7 @@
pserve_argv = ['pserve', config_file]
if parsed_args['reload']:
pserve_argv.append('--reload')
+ pserve_argv.append('http_port=%s' % parsed_args['port'])
pserve.main(pserve_argv)
return 0
|
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -52,6 +52,11 @@\n help='Restart when code or config changes',\n required=False,\n default=False)\n+ parser_start.add_argument('--port',\n+ type=int,\n+ help='Listening port number',\n+ required=False,\n+ default=8888)\n parser_start.set_defaults(which='start')\n \n parsed_args = vars(parser.parse_args(args))\n@@ -94,6 +99,7 @@\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n+ pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n \n return 0\n", "issue": "[cliquet] Add a way to change the port used when using kinto start.\noriginal: https://github.com/mozilla-services/cliquet/issues/686\n\nAll in title\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n dest='subcommand',\n help='init/start/migrate')\n subparsers.required = True\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.set_defaults(which='start')\n\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n\n if parsed_args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n\n elif parsed_args['which'] == 'migrate':\n env = bootstrap(config_file)\n scripts.migrate(env)\n\n elif parsed_args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n\n return 0\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n dest='subcommand',\n help='init/start/migrate')\n subparsers.required = True\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=8888)\n parser_start.set_defaults(which='start')\n\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n\n if parsed_args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n\n elif parsed_args['which'] == 'migrate':\n env = bootstrap(config_file)\n scripts.migrate(env)\n\n elif parsed_args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n return 0\n", "path": "kinto/__main__.py"}]}
| 1,128 | 194 |
gh_patches_debug_4469
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-4156
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DVC status fails after using add -R
Hey :)
I have a folder of files I added with
$$ dvc add -R raw_data
Then I've imported the folder in other git repo using
$$ dvc import -o data --rev raw-v1 https://[email protected]/.../raw_data_reg.git raw_data
Then when execute dvc status I get an error message
```
$$ dvc status -v
2020-06-29 13:54:52,056 DEBUG: fetched: [(3,)]
2020-06-29 13:54:52,059 DEBUG: Creating external repo https://[email protected]/.../raw_data_reg.git@43fb50fb519d58415bba4903b480f44811fefec1
2020-06-29 13:54:52,059 DEBUG: erepo: git clone https://[email protected]/.../raw_data_reg.git to a temporary dir
Cloning |0.00 [00:00, ?obj/s]Password for 'https://[email protected]':
2020-06-29 13:54:58,709 DEBUG: fetched: [(17,)]
2020-06-29 13:54:58,728 ERROR: failed to obtain data status - get_hash() takes 2 positional arguments but 3 were given
------------------------------------------------------------
Traceback (most recent call last):
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py", line 57, in _get_checksum
return repo.find_out_by_relpath(self.def_path).info["md5"]
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py", line 566, in find_out_by_relpath
(out,) = self.find_outs_by_path(path)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py", line 560, in find_outs_by_path
raise OutputNotFoundError(path, self)
dvc.exceptions.OutputNotFoundError: Unable to find DVC-file with output '../../../tmp/tmpq5w4msxhdvc-clone/raw_data'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/command/status.py", line 51, in run
recursive=self.args.recursive,
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py", line 35, in wrapper
ret = f(repo, *args, **kwargs)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py", line 146, in status
self, targets, with_deps=with_deps, recursive=recursive
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py", line 37, in _local_status
return _joint_status(stages)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py", line 23, in _joint_status
status.update(stage.status(check_updates=True))
File "/home/moshe/.local/lib/python3.6/site-packages/funcy/decorators.py", line 39, in wrapper
return deco(call, *dargs, **dkwargs)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/decorators.py", line 35, in rwlocked
return call()
File "/home/moshe/.local/lib/python3.6/site-packages/funcy/decorators.py", line 60, in __call__
return self._func(*self._args, **self._kwargs)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py", line 469, in status
self._status_deps(ret)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py", line 485, in _status_deps
deps_status = self._status(self.deps)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py", line 480, in _status
ret.update(entry.status())
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py", line 71, in status
current_checksum = self._get_checksum(locked=True)
File "/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py", line 67, in _get_checksum
return self.repo.cache.local.get_hash(path, tree)
TypeError: get_hash() takes 2 positional arguments but 3 were given
------------------------------------------------------------
$$ dvc version
DVC version: 1.0.1
Python version: 3.6.9
Platform: Linux-5.3.0-61-generic-x86_64-with-Ubuntu-18.04-bionic
Binary: False
Package: pip
Supported remotes: azure, gdrive, gs, hdfs, http, https, s3, ssh, oss
Cache: reflink - not supported, hardlink - supported, symlink - supported
Filesystem type (cache directory): ('ext4', '/dev/sda1')
Repo: dvc, git
Filesystem type (workspace): ('ext4', '/dev/sda1')
```
Do you know what could be the problem?
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/dependency/repo.py`
Content:
```
1 import os
2
3 from voluptuous import Required
4
5 from dvc.exceptions import OutputNotFoundError
6 from dvc.path_info import PathInfo
7
8 from .local import LocalDependency
9
10
11 class RepoDependency(LocalDependency):
12 PARAM_REPO = "repo"
13 PARAM_URL = "url"
14 PARAM_REV = "rev"
15 PARAM_REV_LOCK = "rev_lock"
16
17 REPO_SCHEMA = {
18 PARAM_REPO: {
19 Required(PARAM_URL): str,
20 PARAM_REV: str,
21 PARAM_REV_LOCK: str,
22 }
23 }
24
25 def __init__(self, def_repo, stage, *args, **kwargs):
26 self.def_repo = def_repo
27 super().__init__(stage, *args, **kwargs)
28
29 def _parse_path(self, remote, path):
30 return None
31
32 @property
33 def is_in_repo(self):
34 return False
35
36 @property
37 def repo_pair(self):
38 d = self.def_repo
39 rev = d.get(self.PARAM_REV_LOCK) or d.get(self.PARAM_REV)
40 return d[self.PARAM_URL], rev
41
42 def __str__(self):
43 return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL])
44
45 def _make_repo(self, *, locked=True):
46 from dvc.external_repo import external_repo
47
48 d = self.def_repo
49 rev = (d.get("rev_lock") if locked else None) or d.get("rev")
50 return external_repo(d["url"], rev=rev)
51
52 def _get_checksum(self, locked=True):
53 from dvc.repo.tree import RepoTree
54
55 with self._make_repo(locked=locked) as repo:
56 try:
57 return repo.find_out_by_relpath(self.def_path).info["md5"]
58 except OutputNotFoundError:
59 path = PathInfo(os.path.join(repo.root_dir, self.def_path))
60
61 # we want stream but not fetch, so DVC out directories are
62 # walked, but dir contents is not fetched
63 tree = RepoTree(repo, stream=True)
64
65 # We are polluting our repo cache with some dir listing here
66 if tree.isdir(path):
67 return self.repo.cache.local.get_hash(path, tree)
68 return tree.get_file_hash(path)
69
70 def status(self):
71 current_checksum = self._get_checksum(locked=True)
72 updated_checksum = self._get_checksum(locked=False)
73
74 if current_checksum != updated_checksum:
75 return {str(self): "update available"}
76
77 return {}
78
79 def save(self):
80 pass
81
82 def dumpd(self):
83 return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}
84
85 def download(self, to):
86 with self._make_repo() as repo:
87 if self.def_repo.get(self.PARAM_REV_LOCK) is None:
88 self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()
89
90 cache = self.repo.cache.local
91 with repo.use_cache(cache):
92 _, _, cache_infos = repo.fetch_external([self.def_path])
93 cache.checkout(to.path_info, cache_infos[0])
94
95 def update(self, rev=None):
96 if rev:
97 self.def_repo[self.PARAM_REV] = rev
98
99 with self._make_repo(locked=False) as repo:
100 self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py
--- a/dvc/dependency/repo.py
+++ b/dvc/dependency/repo.py
@@ -64,7 +64,7 @@
# We are polluting our repo cache with some dir listing here
if tree.isdir(path):
- return self.repo.cache.local.get_hash(path, tree)
+ return self.repo.cache.local.get_hash(path, tree=tree)
return tree.get_file_hash(path)
def status(self):
|
{"golden_diff": "diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py\n--- a/dvc/dependency/repo.py\n+++ b/dvc/dependency/repo.py\n@@ -64,7 +64,7 @@\n \n # We are polluting our repo cache with some dir listing here\n if tree.isdir(path):\n- return self.repo.cache.local.get_hash(path, tree)\n+ return self.repo.cache.local.get_hash(path, tree=tree)\n return tree.get_file_hash(path)\n \n def status(self):\n", "issue": "DVC status fails after using add -R\nHey :)\r\n\r\nI have a folder of files I added with \r\n$$ dvc add -R raw_data\r\n\r\nThen I've imported the folder in other git repo using\r\n$$ dvc import -o data --rev raw-v1 https://[email protected]/.../raw_data_reg.git raw_data\r\n\r\nThen when execute dvc status I get an error message\r\n\r\n```\r\n$$ dvc status -v\r\n\r\n2020-06-29 13:54:52,056 DEBUG: fetched: [(3,)] \r\n2020-06-29 13:54:52,059 DEBUG: Creating external repo https://[email protected]/.../raw_data_reg.git@43fb50fb519d58415bba4903b480f44811fefec1\r\n2020-06-29 13:54:52,059 DEBUG: erepo: git clone https://[email protected]/.../raw_data_reg.git to a temporary dir\r\nCloning |0.00 [00:00, ?obj/s]Password for 'https://[email protected]':\r\n2020-06-29 13:54:58,709 DEBUG: fetched: [(17,)] \r\n2020-06-29 13:54:58,728 ERROR: failed to obtain data status - get_hash() takes 2 positional arguments but 3 were given\r\n------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py\", line 57, in _get_checksum\r\n return repo.find_out_by_relpath(self.def_path).info[\"md5\"]\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py\", line 566, in find_out_by_relpath\r\n (out,) = self.find_outs_by_path(path)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py\", line 560, in find_outs_by_path\r\n raise OutputNotFoundError(path, self)\r\ndvc.exceptions.OutputNotFoundError: Unable to find DVC-file with output '../../../tmp/tmpq5w4msxhdvc-clone/raw_data'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/command/status.py\", line 51, in run\r\n recursive=self.args.recursive,\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/__init__.py\", line 35, in wrapper\r\n ret = f(repo, *args, **kwargs)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py\", line 146, in status\r\n self, targets, with_deps=with_deps, recursive=recursive\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py\", line 37, in _local_status\r\n return _joint_status(stages)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/repo/status.py\", line 23, in _joint_status\r\n status.update(stage.status(check_updates=True))\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/funcy/decorators.py\", line 39, in wrapper\r\n return deco(call, *dargs, **dkwargs)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/decorators.py\", line 35, in rwlocked\r\n return call()\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/funcy/decorators.py\", line 60, in __call__\r\n return self._func(*self._args, **self._kwargs)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py\", line 469, in status\r\n self._status_deps(ret)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py\", line 485, in _status_deps\r\n deps_status = self._status(self.deps)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/stage/__init__.py\", line 480, in _status\r\n ret.update(entry.status())\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py\", line 71, in status\r\n current_checksum = self._get_checksum(locked=True)\r\n File \"/home/moshe/.local/lib/python3.6/site-packages/dvc/dependency/repo.py\", line 67, in _get_checksum\r\n return self.repo.cache.local.get_hash(path, tree)\r\nTypeError: get_hash() takes 2 positional arguments but 3 were given\r\n------------------------------------------------------------\r\n\r\n$$ dvc version\r\nDVC version: 1.0.1\r\nPython version: 3.6.9\r\nPlatform: Linux-5.3.0-61-generic-x86_64-with-Ubuntu-18.04-bionic\r\nBinary: False\r\nPackage: pip\r\nSupported remotes: azure, gdrive, gs, hdfs, http, https, s3, ssh, oss\r\nCache: reflink - not supported, hardlink - supported, symlink - supported\r\nFilesystem type (cache directory): ('ext4', '/dev/sda1')\r\nRepo: dvc, git\r\nFilesystem type (workspace): ('ext4', '/dev/sda1')\r\n```\r\n\r\nDo you know what could be the problem?\r\nThanks.\r\n\n", "before_files": [{"content": "import os\n\nfrom voluptuous import Required\n\nfrom dvc.exceptions import OutputNotFoundError\nfrom dvc.path_info import PathInfo\n\nfrom .local import LocalDependency\n\n\nclass RepoDependency(LocalDependency):\n PARAM_REPO = \"repo\"\n PARAM_URL = \"url\"\n PARAM_REV = \"rev\"\n PARAM_REV_LOCK = \"rev_lock\"\n\n REPO_SCHEMA = {\n PARAM_REPO: {\n Required(PARAM_URL): str,\n PARAM_REV: str,\n PARAM_REV_LOCK: str,\n }\n }\n\n def __init__(self, def_repo, stage, *args, **kwargs):\n self.def_repo = def_repo\n super().__init__(stage, *args, **kwargs)\n\n def _parse_path(self, remote, path):\n return None\n\n @property\n def is_in_repo(self):\n return False\n\n @property\n def repo_pair(self):\n d = self.def_repo\n rev = d.get(self.PARAM_REV_LOCK) or d.get(self.PARAM_REV)\n return d[self.PARAM_URL], rev\n\n def __str__(self):\n return \"{} ({})\".format(self.def_path, self.def_repo[self.PARAM_URL])\n\n def _make_repo(self, *, locked=True):\n from dvc.external_repo import external_repo\n\n d = self.def_repo\n rev = (d.get(\"rev_lock\") if locked else None) or d.get(\"rev\")\n return external_repo(d[\"url\"], rev=rev)\n\n def _get_checksum(self, locked=True):\n from dvc.repo.tree import RepoTree\n\n with self._make_repo(locked=locked) as repo:\n try:\n return repo.find_out_by_relpath(self.def_path).info[\"md5\"]\n except OutputNotFoundError:\n path = PathInfo(os.path.join(repo.root_dir, self.def_path))\n\n # we want stream but not fetch, so DVC out directories are\n # walked, but dir contents is not fetched\n tree = RepoTree(repo, stream=True)\n\n # We are polluting our repo cache with some dir listing here\n if tree.isdir(path):\n return self.repo.cache.local.get_hash(path, tree)\n return tree.get_file_hash(path)\n\n def status(self):\n current_checksum = self._get_checksum(locked=True)\n updated_checksum = self._get_checksum(locked=False)\n\n if current_checksum != updated_checksum:\n return {str(self): \"update available\"}\n\n return {}\n\n def save(self):\n pass\n\n def dumpd(self):\n return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}\n\n def download(self, to):\n with self._make_repo() as repo:\n if self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n\n cache = self.repo.cache.local\n with repo.use_cache(cache):\n _, _, cache_infos = repo.fetch_external([self.def_path])\n cache.checkout(to.path_info, cache_infos[0])\n\n def update(self, rev=None):\n if rev:\n self.def_repo[self.PARAM_REV] = rev\n\n with self._make_repo(locked=False) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n", "path": "dvc/dependency/repo.py"}], "after_files": [{"content": "import os\n\nfrom voluptuous import Required\n\nfrom dvc.exceptions import OutputNotFoundError\nfrom dvc.path_info import PathInfo\n\nfrom .local import LocalDependency\n\n\nclass RepoDependency(LocalDependency):\n PARAM_REPO = \"repo\"\n PARAM_URL = \"url\"\n PARAM_REV = \"rev\"\n PARAM_REV_LOCK = \"rev_lock\"\n\n REPO_SCHEMA = {\n PARAM_REPO: {\n Required(PARAM_URL): str,\n PARAM_REV: str,\n PARAM_REV_LOCK: str,\n }\n }\n\n def __init__(self, def_repo, stage, *args, **kwargs):\n self.def_repo = def_repo\n super().__init__(stage, *args, **kwargs)\n\n def _parse_path(self, remote, path):\n return None\n\n @property\n def is_in_repo(self):\n return False\n\n @property\n def repo_pair(self):\n d = self.def_repo\n rev = d.get(self.PARAM_REV_LOCK) or d.get(self.PARAM_REV)\n return d[self.PARAM_URL], rev\n\n def __str__(self):\n return \"{} ({})\".format(self.def_path, self.def_repo[self.PARAM_URL])\n\n def _make_repo(self, *, locked=True):\n from dvc.external_repo import external_repo\n\n d = self.def_repo\n rev = (d.get(\"rev_lock\") if locked else None) or d.get(\"rev\")\n return external_repo(d[\"url\"], rev=rev)\n\n def _get_checksum(self, locked=True):\n from dvc.repo.tree import RepoTree\n\n with self._make_repo(locked=locked) as repo:\n try:\n return repo.find_out_by_relpath(self.def_path).info[\"md5\"]\n except OutputNotFoundError:\n path = PathInfo(os.path.join(repo.root_dir, self.def_path))\n\n # we want stream but not fetch, so DVC out directories are\n # walked, but dir contents is not fetched\n tree = RepoTree(repo, stream=True)\n\n # We are polluting our repo cache with some dir listing here\n if tree.isdir(path):\n return self.repo.cache.local.get_hash(path, tree=tree)\n return tree.get_file_hash(path)\n\n def status(self):\n current_checksum = self._get_checksum(locked=True)\n updated_checksum = self._get_checksum(locked=False)\n\n if current_checksum != updated_checksum:\n return {str(self): \"update available\"}\n\n return {}\n\n def save(self):\n pass\n\n def dumpd(self):\n return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}\n\n def download(self, to):\n with self._make_repo() as repo:\n if self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n\n cache = self.repo.cache.local\n with repo.use_cache(cache):\n _, _, cache_infos = repo.fetch_external([self.def_path])\n cache.checkout(to.path_info, cache_infos[0])\n\n def update(self, rev=None):\n if rev:\n self.def_repo[self.PARAM_REV] = rev\n\n with self._make_repo(locked=False) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n", "path": "dvc/dependency/repo.py"}]}
| 2,473 | 116 |
gh_patches_debug_6001
|
rasdani/github-patches
|
git_diff
|
holoviz__holoviews-6237
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug with hvplot plotly violin plots using 'by' argument
Violin plots on hvplot with the plotly extension do not work properly when using the 'by' argument to make a violin for each category. Instead of one violin per category as expected, only the first letter of each category is used, and violins of categories with the same first letter are stacked. The legend shows only the first letter of each category name instead of the full names.
This can also be seen in the hvplot documentation. For example [the violin plot at the bottom of this page](https://hvplot.holoviz.org/user_guide/Plotting.html), using bokeh, in which each category has its own violin and its full name shown on the x axis and on the legend. This is the expected behaviour.
Compare with [the same plot using the plotly extension](https://hvplot.holoviz.org/user_guide/Plotting_with_Plotly.html), you can see that any categories that share the same first letter are stacked e.g. OO and OH are stacked and are both labelled as O.
Software versions, although I think it doesn't matter since the bug is visible in the documentation as well:
Python 3.12 running on mac os 13.3.1 and on a Linux server
Holoviews 1.18.3
Reproducible code:
```
import numpy as np
import hvplot.pandas
import hvplot.dask
hvplot.extension('plotly')
from hvplot.sample_data import us_crime, airline_flights
flights = airline_flights.to_dask().persist()
bugged_plot = flights.hvplot.violin(y='depdelay', by='carrier', ylim=(-20, 60), height=500)
hvplot.show(bugged_plot)
hvplot.extension('bokeh')
correct_plot = flights.hvplot.violin(y='depdelay', by='carrier', ylim=(-20, 60), height=500)
hvplot.show(correct_plot)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holoviews/plotting/plotly/stats.py`
Content:
```
1 import param
2
3 from ..mixins import MultiDistributionMixin
4 from .chart import ChartPlot
5 from .element import ColorbarPlot, ElementPlot
6 from .selection import PlotlyOverlaySelectionDisplay
7
8
9 class BivariatePlot(ChartPlot, ColorbarPlot):
10
11 filled = param.Boolean(default=False)
12
13 ncontours = param.Integer(default=None)
14
15 style_opts = ['visible', 'cmap', 'showlabels', 'labelfont', 'labelformat', 'showlines']
16
17 _style_key = 'contours'
18
19 selection_display = PlotlyOverlaySelectionDisplay()
20
21 @classmethod
22 def trace_kwargs(cls, is_geo=False, **kwargs):
23 return {'type': 'histogram2dcontour'}
24
25 def graph_options(self, element, ranges, style, **kwargs):
26 opts = super().graph_options(element, ranges, style, **kwargs)
27 copts = self.get_color_opts(element.vdims[0], element, ranges, style)
28
29 if self.ncontours:
30 opts['autocontour'] = False
31 opts['ncontours'] = self.ncontours
32
33 # Make line width a little wider (default is less than 1)
34 opts['line'] = {'width': 1}
35
36 # Configure contours
37 opts['contours'] = {
38 'coloring': 'fill' if self.filled else 'lines',
39 'showlines': style.get('showlines', True)
40 }
41
42 # Add colorscale
43 opts['colorscale'] = copts['colorscale']
44
45 # Add colorbar
46 if 'colorbar' in copts:
47 opts['colorbar'] = copts['colorbar']
48
49 opts['showscale'] = copts.get('showscale', False)
50
51 # Add visible
52 opts['visible'] = style.get('visible', True)
53
54 return opts
55
56
57 class DistributionPlot(ElementPlot):
58
59 bandwidth = param.Number(default=None, doc="""
60 The bandwidth of the kernel for the density estimate.""")
61
62 cut = param.Number(default=3, doc="""
63 Draw the estimate to cut * bw from the extreme data points.""")
64
65 filled = param.Boolean(default=True, doc="""
66 Whether the bivariate contours should be filled.""")
67
68 style_opts = ['visible', 'color', 'dash', 'line_width']
69
70 _style_key = 'line'
71
72 selection_display = PlotlyOverlaySelectionDisplay()
73
74 @classmethod
75 def trace_kwargs(cls, is_geo=False, **kwargs):
76 return {'type': 'scatter', 'mode': 'lines'}
77
78
79 class MultiDistributionPlot(MultiDistributionMixin, ElementPlot):
80
81 def get_data(self, element, ranges, style, **kwargs):
82 if element.kdims:
83 groups = element.groupby(element.kdims).items()
84 else:
85 groups = [(element.label, element)]
86 plots = []
87 axis = 'x' if self.invert_axes else 'y'
88 for key, group in groups:
89 if element.kdims:
90 label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
91 else:
92 label = key
93 data = {axis: group.dimension_values(group.vdims[0]), 'name': label}
94 plots.append(data)
95 return plots
96
97
98
99 class BoxWhiskerPlot(MultiDistributionPlot):
100
101 boxpoints = param.ObjectSelector(objects=["all", "outliers",
102 "suspectedoutliers", False],
103 default='outliers', doc="""
104 Which points to show, valid options are 'all', 'outliers',
105 'suspectedoutliers' and False""")
106
107 jitter = param.Number(default=0, doc="""
108 Sets the amount of jitter in the sample points drawn. If "0",
109 the sample points align along the distribution axis. If "1",
110 the sample points are drawn in a random jitter of width equal
111 to the width of the box(es).""")
112
113 mean = param.ObjectSelector(default=False, objects=[True, False, 'sd'],
114 doc="""
115 If "True", the mean of the box(es)' underlying distribution
116 is drawn as a dashed line inside the box(es). If "sd" the
117 standard deviation is also drawn.""")
118
119 style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']
120
121 _style_key = 'marker'
122
123 selection_display = PlotlyOverlaySelectionDisplay()
124
125 @classmethod
126 def trace_kwargs(cls, is_geo=False, **kwargs):
127 return {'type': 'box'}
128
129 def graph_options(self, element, ranges, style, **kwargs):
130 options = super().graph_options(element, ranges, style, **kwargs)
131 options['boxmean'] = self.mean
132 options['jitter'] = self.jitter
133 return options
134
135
136 class ViolinPlot(MultiDistributionPlot):
137
138 box = param.Boolean(default=True, doc="""
139 Whether to draw a boxplot inside the violin""")
140
141 meanline = param.Boolean(default=False, doc="""
142 If "True", the mean of the box(es)' underlying distribution
143 is drawn as a dashed line inside the box(es). If "sd" the
144 standard deviation is also drawn.""")
145
146 style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']
147
148 _style_key = 'marker'
149
150 @classmethod
151 def trace_kwargs(cls, is_geo=False, **kwargs):
152 return {'type': 'violin'}
153
154 def graph_options(self, element, ranges, style, **kwargs):
155 options = super().graph_options(
156 element, ranges, style, **kwargs
157 )
158 options['meanline'] = {'visible': self.meanline}
159 options['box'] = {'visible': self.box}
160 return options
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/holoviews/plotting/plotly/stats.py b/holoviews/plotting/plotly/stats.py
--- a/holoviews/plotting/plotly/stats.py
+++ b/holoviews/plotting/plotly/stats.py
@@ -87,6 +87,8 @@
axis = 'x' if self.invert_axes else 'y'
for key, group in groups:
if element.kdims:
+ if isinstance(key, str):
+ key = (key,)
label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
else:
label = key
|
{"golden_diff": "diff --git a/holoviews/plotting/plotly/stats.py b/holoviews/plotting/plotly/stats.py\n--- a/holoviews/plotting/plotly/stats.py\n+++ b/holoviews/plotting/plotly/stats.py\n@@ -87,6 +87,8 @@\n axis = 'x' if self.invert_axes else 'y'\n for key, group in groups:\n if element.kdims:\n+ if isinstance(key, str):\n+ key = (key,)\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n", "issue": "Bug with hvplot plotly violin plots using 'by' argument\nViolin plots on hvplot with the plotly extension do not work properly when using the 'by' argument to make a violin for each category. Instead of one violin per category as expected, only the first letter of each category is used, and violins of categories with the same first letter are stacked. The legend shows only the first letter of each category name instead of the full names.\r\n\r\nThis can also be seen in the hvplot documentation. For example [the violin plot at the bottom of this page](https://hvplot.holoviz.org/user_guide/Plotting.html), using bokeh, in which each category has its own violin and its full name shown on the x axis and on the legend. This is the expected behaviour.\r\n\r\nCompare with [the same plot using the plotly extension](https://hvplot.holoviz.org/user_guide/Plotting_with_Plotly.html), you can see that any categories that share the same first letter are stacked e.g. OO and OH are stacked and are both labelled as O.\r\n\r\nSoftware versions, although I think it doesn't matter since the bug is visible in the documentation as well:\r\nPython 3.12 running on mac os 13.3.1 and on a Linux server\r\nHoloviews 1.18.3\r\n\r\nReproducible code:\r\n```\r\nimport numpy as np\r\nimport hvplot.pandas\r\nimport hvplot.dask\r\n\r\nhvplot.extension('plotly')\r\nfrom hvplot.sample_data import us_crime, airline_flights\r\nflights = airline_flights.to_dask().persist()\r\nbugged_plot = flights.hvplot.violin(y='depdelay', by='carrier', ylim=(-20, 60), height=500)\r\nhvplot.show(bugged_plot)\r\n\r\nhvplot.extension('bokeh')\r\ncorrect_plot = flights.hvplot.violin(y='depdelay', by='carrier', ylim=(-20, 60), height=500)\r\nhvplot.show(correct_plot)\r\n```\r\n\n", "before_files": [{"content": "import param\n\nfrom ..mixins import MultiDistributionMixin\nfrom .chart import ChartPlot\nfrom .element import ColorbarPlot, ElementPlot\nfrom .selection import PlotlyOverlaySelectionDisplay\n\n\nclass BivariatePlot(ChartPlot, ColorbarPlot):\n\n filled = param.Boolean(default=False)\n\n ncontours = param.Integer(default=None)\n\n style_opts = ['visible', 'cmap', 'showlabels', 'labelfont', 'labelformat', 'showlines']\n\n _style_key = 'contours'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'histogram2dcontour'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n opts = super().graph_options(element, ranges, style, **kwargs)\n copts = self.get_color_opts(element.vdims[0], element, ranges, style)\n\n if self.ncontours:\n opts['autocontour'] = False\n opts['ncontours'] = self.ncontours\n\n # Make line width a little wider (default is less than 1)\n opts['line'] = {'width': 1}\n\n # Configure contours\n opts['contours'] = {\n 'coloring': 'fill' if self.filled else 'lines',\n 'showlines': style.get('showlines', True)\n }\n\n # Add colorscale\n opts['colorscale'] = copts['colorscale']\n\n # Add colorbar\n if 'colorbar' in copts:\n opts['colorbar'] = copts['colorbar']\n\n opts['showscale'] = copts.get('showscale', False)\n\n # Add visible\n opts['visible'] = style.get('visible', True)\n\n return opts\n\n\nclass DistributionPlot(ElementPlot):\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=True, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n style_opts = ['visible', 'color', 'dash', 'line_width']\n\n _style_key = 'line'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'scatter', 'mode': 'lines'}\n\n\nclass MultiDistributionPlot(MultiDistributionMixin, ElementPlot):\n\n def get_data(self, element, ranges, style, **kwargs):\n if element.kdims:\n groups = element.groupby(element.kdims).items()\n else:\n groups = [(element.label, element)]\n plots = []\n axis = 'x' if self.invert_axes else 'y'\n for key, group in groups:\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n data = {axis: group.dimension_values(group.vdims[0]), 'name': label}\n plots.append(data)\n return plots\n\n\n\nclass BoxWhiskerPlot(MultiDistributionPlot):\n\n boxpoints = param.ObjectSelector(objects=[\"all\", \"outliers\",\n \"suspectedoutliers\", False],\n default='outliers', doc=\"\"\"\n Which points to show, valid options are 'all', 'outliers',\n 'suspectedoutliers' and False\"\"\")\n\n jitter = param.Number(default=0, doc=\"\"\"\n Sets the amount of jitter in the sample points drawn. If \"0\",\n the sample points align along the distribution axis. If \"1\",\n the sample points are drawn in a random jitter of width equal\n to the width of the box(es).\"\"\")\n\n mean = param.ObjectSelector(default=False, objects=[True, False, 'sd'],\n doc=\"\"\"\n If \"True\", the mean of the box(es)' underlying distribution\n is drawn as a dashed line inside the box(es). If \"sd\" the\n standard deviation is also drawn.\"\"\")\n\n style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']\n\n _style_key = 'marker'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'box'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n options = super().graph_options(element, ranges, style, **kwargs)\n options['boxmean'] = self.mean\n options['jitter'] = self.jitter\n return options\n\n\nclass ViolinPlot(MultiDistributionPlot):\n\n box = param.Boolean(default=True, doc=\"\"\"\n Whether to draw a boxplot inside the violin\"\"\")\n\n meanline = param.Boolean(default=False, doc=\"\"\"\n If \"True\", the mean of the box(es)' underlying distribution\n is drawn as a dashed line inside the box(es). If \"sd\" the\n standard deviation is also drawn.\"\"\")\n\n style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']\n\n _style_key = 'marker'\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'violin'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n options = super().graph_options(\n element, ranges, style, **kwargs\n )\n options['meanline'] = {'visible': self.meanline}\n options['box'] = {'visible': self.box}\n return options\n", "path": "holoviews/plotting/plotly/stats.py"}], "after_files": [{"content": "import param\n\nfrom ..mixins import MultiDistributionMixin\nfrom .chart import ChartPlot\nfrom .element import ColorbarPlot, ElementPlot\nfrom .selection import PlotlyOverlaySelectionDisplay\n\n\nclass BivariatePlot(ChartPlot, ColorbarPlot):\n\n filled = param.Boolean(default=False)\n\n ncontours = param.Integer(default=None)\n\n style_opts = ['visible', 'cmap', 'showlabels', 'labelfont', 'labelformat', 'showlines']\n\n _style_key = 'contours'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'histogram2dcontour'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n opts = super().graph_options(element, ranges, style, **kwargs)\n copts = self.get_color_opts(element.vdims[0], element, ranges, style)\n\n if self.ncontours:\n opts['autocontour'] = False\n opts['ncontours'] = self.ncontours\n\n # Make line width a little wider (default is less than 1)\n opts['line'] = {'width': 1}\n\n # Configure contours\n opts['contours'] = {\n 'coloring': 'fill' if self.filled else 'lines',\n 'showlines': style.get('showlines', True)\n }\n\n # Add colorscale\n opts['colorscale'] = copts['colorscale']\n\n # Add colorbar\n if 'colorbar' in copts:\n opts['colorbar'] = copts['colorbar']\n\n opts['showscale'] = copts.get('showscale', False)\n\n # Add visible\n opts['visible'] = style.get('visible', True)\n\n return opts\n\n\nclass DistributionPlot(ElementPlot):\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=True, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n style_opts = ['visible', 'color', 'dash', 'line_width']\n\n _style_key = 'line'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'scatter', 'mode': 'lines'}\n\n\nclass MultiDistributionPlot(MultiDistributionMixin, ElementPlot):\n\n def get_data(self, element, ranges, style, **kwargs):\n if element.kdims:\n groups = element.groupby(element.kdims).items()\n else:\n groups = [(element.label, element)]\n plots = []\n axis = 'x' if self.invert_axes else 'y'\n for key, group in groups:\n if element.kdims:\n if isinstance(key, str):\n key = (key,)\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n data = {axis: group.dimension_values(group.vdims[0]), 'name': label}\n plots.append(data)\n return plots\n\n\n\nclass BoxWhiskerPlot(MultiDistributionPlot):\n\n boxpoints = param.ObjectSelector(objects=[\"all\", \"outliers\",\n \"suspectedoutliers\", False],\n default='outliers', doc=\"\"\"\n Which points to show, valid options are 'all', 'outliers',\n 'suspectedoutliers' and False\"\"\")\n\n jitter = param.Number(default=0, doc=\"\"\"\n Sets the amount of jitter in the sample points drawn. If \"0\",\n the sample points align along the distribution axis. If \"1\",\n the sample points are drawn in a random jitter of width equal\n to the width of the box(es).\"\"\")\n\n mean = param.ObjectSelector(default=False, objects=[True, False, 'sd'],\n doc=\"\"\"\n If \"True\", the mean of the box(es)' underlying distribution\n is drawn as a dashed line inside the box(es). If \"sd\" the\n standard deviation is also drawn.\"\"\")\n\n style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']\n\n _style_key = 'marker'\n\n selection_display = PlotlyOverlaySelectionDisplay()\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'box'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n options = super().graph_options(element, ranges, style, **kwargs)\n options['boxmean'] = self.mean\n options['jitter'] = self.jitter\n return options\n\n\nclass ViolinPlot(MultiDistributionPlot):\n\n box = param.Boolean(default=True, doc=\"\"\"\n Whether to draw a boxplot inside the violin\"\"\")\n\n meanline = param.Boolean(default=False, doc=\"\"\"\n If \"True\", the mean of the box(es)' underlying distribution\n is drawn as a dashed line inside the box(es). If \"sd\" the\n standard deviation is also drawn.\"\"\")\n\n style_opts = ['visible', 'color', 'alpha', 'outliercolor', 'marker', 'size']\n\n _style_key = 'marker'\n\n @classmethod\n def trace_kwargs(cls, is_geo=False, **kwargs):\n return {'type': 'violin'}\n\n def graph_options(self, element, ranges, style, **kwargs):\n options = super().graph_options(\n element, ranges, style, **kwargs\n )\n options['meanline'] = {'visible': self.meanline}\n options['box'] = {'visible': self.box}\n return options\n", "path": "holoviews/plotting/plotly/stats.py"}]}
| 2,334 | 152 |
gh_patches_debug_36961
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-10981
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow ordering by multiple fields on the Wagtail API
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
The Wagtail API does not allow ordering by multiple fields at the same time. For example, let's say I want a list of blog pages sorted by type and in reverse chronological order. This is currently not possible in the API.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
`[GET] /api/v2/pages/?type=blog.BlogPage&order=-published_at,blog_type`
The result should be the same as if this query was run:
```python
BlogPage.objects.order_by('-published_at', 'blog_type')
```
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
In the meantime, we're going to add this functionality ad-hoc.
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
This is for a headless website.
Allow ordering by multiple fields on the Wagtail API
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
The Wagtail API does not allow ordering by multiple fields at the same time. For example, let's say I want a list of blog pages sorted by type and in reverse chronological order. This is currently not possible in the API.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
`[GET] /api/v2/pages/?type=blog.BlogPage&order=-published_at,blog_type`
The result should be the same as if this query was run:
```python
BlogPage.objects.order_by('-published_at', 'blog_type')
```
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
In the meantime, we're going to add this functionality ad-hoc.
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
This is for a headless website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/api/v2/filters.py`
Content:
```
1 from django.conf import settings
2 from django.db import models
3 from django.shortcuts import get_object_or_404
4 from rest_framework.filters import BaseFilterBackend
5 from taggit.managers import TaggableManager
6
7 from wagtail.models import Locale, Page
8 from wagtail.search.backends import get_search_backend
9 from wagtail.search.backends.base import FilterFieldError, OrderByFieldError
10
11 from .utils import BadRequestError, parse_boolean
12
13
14 class FieldsFilter(BaseFilterBackend):
15 def filter_queryset(self, request, queryset, view):
16 """
17 This performs field level filtering on the result set
18 Eg: ?title=James Joyce
19 """
20 fields = set(view.get_available_fields(queryset.model, db_fields_only=True))
21
22 # Locale is a database field, but we provide a separate filter for it
23 if "locale" in fields:
24 fields.remove("locale")
25
26 for field_name, value in request.GET.items():
27 if field_name in fields:
28 try:
29 field = queryset.model._meta.get_field(field_name)
30 except LookupError:
31 field = None
32
33 # Convert value into python
34 try:
35 if isinstance(
36 field, (models.BooleanField, models.NullBooleanField)
37 ):
38 value = parse_boolean(value)
39 elif isinstance(field, (models.IntegerField, models.AutoField)):
40 value = int(value)
41 elif isinstance(field, models.ForeignKey):
42 value = field.target_field.get_prep_value(value)
43 except ValueError as e:
44 raise BadRequestError(
45 "field filter error. '%s' is not a valid value for %s (%s)"
46 % (value, field_name, str(e))
47 )
48
49 if isinstance(field, TaggableManager):
50 for tag in value.split(","):
51 queryset = queryset.filter(**{field_name + "__name": tag})
52
53 # Stick a message on the queryset to indicate that tag filtering has been performed
54 # This will let the do_search method know that it must raise an error as searching
55 # and tag filtering at the same time is not supported
56 queryset._filtered_by_tag = True
57 else:
58 queryset = queryset.filter(**{field_name: value})
59
60 return queryset
61
62
63 class OrderingFilter(BaseFilterBackend):
64 def filter_queryset(self, request, queryset, view):
65 """
66 This applies ordering to the result set
67 Eg: ?order=title
68
69 It also supports reverse ordering
70 Eg: ?order=-title
71
72 And random ordering
73 Eg: ?order=random
74 """
75 if "order" in request.GET:
76 order_by = request.GET["order"]
77
78 # Random ordering
79 if order_by == "random":
80 # Prevent ordering by random with offset
81 if "offset" in request.GET:
82 raise BadRequestError(
83 "random ordering with offset is not supported"
84 )
85
86 return queryset.order_by("?")
87
88 # Check if reverse ordering is set
89 if order_by.startswith("-"):
90 reverse_order = True
91 order_by = order_by[1:]
92 else:
93 reverse_order = False
94
95 # Add ordering
96 if order_by in view.get_available_fields(queryset.model):
97 queryset = queryset.order_by(order_by)
98 else:
99 # Unknown field
100 raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
101
102 # Reverse order
103 if reverse_order:
104 queryset = queryset.reverse()
105
106 return queryset
107
108
109 class SearchFilter(BaseFilterBackend):
110 def filter_queryset(self, request, queryset, view):
111 """
112 This performs a full-text search on the result set
113 Eg: ?search=James Joyce
114 """
115 search_enabled = getattr(settings, "WAGTAILAPI_SEARCH_ENABLED", True)
116
117 if "search" in request.GET:
118 if not search_enabled:
119 raise BadRequestError("search is disabled")
120
121 # Searching and filtering by tag at the same time is not supported
122 if getattr(queryset, "_filtered_by_tag", False):
123 raise BadRequestError(
124 "filtering by tag with a search query is not supported"
125 )
126
127 search_query = request.GET["search"]
128 search_operator = request.GET.get("search_operator", None)
129 order_by_relevance = "order" not in request.GET
130
131 sb = get_search_backend()
132 try:
133 queryset = sb.search(
134 search_query,
135 queryset,
136 operator=search_operator,
137 order_by_relevance=order_by_relevance,
138 )
139 except FilterFieldError as e:
140 raise BadRequestError(
141 "cannot filter by '{}' while searching (field is not indexed)".format(
142 e.field_name
143 )
144 )
145 except OrderByFieldError as e:
146 raise BadRequestError(
147 "cannot order by '{}' while searching (field is not indexed)".format(
148 e.field_name
149 )
150 )
151
152 return queryset
153
154
155 class ChildOfFilter(BaseFilterBackend):
156 """
157 Implements the ?child_of filter used to filter the results to only contain
158 pages that are direct children of the specified page.
159 """
160
161 def filter_queryset(self, request, queryset, view):
162 if "child_of" in request.GET:
163 try:
164 parent_page_id = int(request.GET["child_of"])
165 if parent_page_id < 0:
166 raise ValueError()
167
168 parent_page = view.get_base_queryset().get(id=parent_page_id)
169 except ValueError:
170 if request.GET["child_of"] == "root":
171 parent_page = view.get_root_page()
172 else:
173 raise BadRequestError("child_of must be a positive integer")
174 except Page.DoesNotExist:
175 raise BadRequestError("parent page doesn't exist")
176
177 queryset = queryset.child_of(parent_page)
178
179 # Save the parent page on the queryset. This is required for the page
180 # explorer, which needs to pass the parent page into
181 # `construct_explorer_page_queryset` hook functions
182 queryset._filtered_by_child_of = parent_page
183
184 return queryset
185
186
187 class AncestorOfFilter(BaseFilterBackend):
188 """
189 Implements the ?ancestor filter which limits the set of pages to a
190 particular branch of the page tree.
191 """
192
193 def filter_queryset(self, request, queryset, view):
194 if "ancestor_of" in request.GET:
195 try:
196 descendant_page_id = int(request.GET["ancestor_of"])
197 if descendant_page_id < 0:
198 raise ValueError()
199
200 descendant_page = view.get_base_queryset().get(id=descendant_page_id)
201 except ValueError:
202 raise BadRequestError("ancestor_of must be a positive integer")
203 except Page.DoesNotExist:
204 raise BadRequestError("descendant page doesn't exist")
205
206 queryset = queryset.ancestor_of(descendant_page)
207
208 return queryset
209
210
211 class DescendantOfFilter(BaseFilterBackend):
212 """
213 Implements the ?decendant_of filter which limits the set of pages to a
214 particular branch of the page tree.
215 """
216
217 def filter_queryset(self, request, queryset, view):
218 if "descendant_of" in request.GET:
219 if hasattr(queryset, "_filtered_by_child_of"):
220 raise BadRequestError(
221 "filtering by descendant_of with child_of is not supported"
222 )
223 try:
224 parent_page_id = int(request.GET["descendant_of"])
225 if parent_page_id < 0:
226 raise ValueError()
227
228 parent_page = view.get_base_queryset().get(id=parent_page_id)
229 except ValueError:
230 if request.GET["descendant_of"] == "root":
231 parent_page = view.get_root_page()
232 else:
233 raise BadRequestError("descendant_of must be a positive integer")
234 except Page.DoesNotExist:
235 raise BadRequestError("ancestor page doesn't exist")
236
237 queryset = queryset.descendant_of(parent_page)
238
239 return queryset
240
241
242 class TranslationOfFilter(BaseFilterBackend):
243 """
244 Implements the ?translation_of filter which limits the set of pages to translations
245 of a page.
246 """
247
248 def filter_queryset(self, request, queryset, view):
249 if "translation_of" in request.GET:
250 try:
251 page_id = int(request.GET["translation_of"])
252 if page_id < 0:
253 raise ValueError()
254
255 page = view.get_base_queryset().get(id=page_id)
256 except ValueError:
257 if request.GET["translation_of"] == "root":
258 page = view.get_root_page()
259 else:
260 raise BadRequestError("translation_of must be a positive integer")
261 except Page.DoesNotExist:
262 raise BadRequestError("translation_of page doesn't exist")
263
264 _filtered_by_child_of = getattr(queryset, "_filtered_by_child_of", None)
265
266 queryset = queryset.translation_of(page)
267
268 if _filtered_by_child_of:
269 queryset._filtered_by_child_of = _filtered_by_child_of
270
271 return queryset
272
273
274 class LocaleFilter(BaseFilterBackend):
275 """
276 Implements the ?locale filter which limits the set of pages to a
277 particular locale.
278 """
279
280 def filter_queryset(self, request, queryset, view):
281 if "locale" in request.GET:
282 _filtered_by_child_of = getattr(queryset, "_filtered_by_child_of", None)
283
284 locale = get_object_or_404(Locale, language_code=request.GET["locale"])
285 queryset = queryset.filter(locale=locale)
286
287 if _filtered_by_child_of:
288 queryset._filtered_by_child_of = _filtered_by_child_of
289
290 return queryset
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/api/v2/filters.py b/wagtail/api/v2/filters.py
--- a/wagtail/api/v2/filters.py
+++ b/wagtail/api/v2/filters.py
@@ -63,8 +63,8 @@
class OrderingFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
"""
- This applies ordering to the result set
- Eg: ?order=title
+ This applies ordering to the result set with support for multiple fields.
+ Eg: ?order=title or ?order=title,created_at
It also supports reverse ordering
Eg: ?order=-title
@@ -73,10 +73,14 @@
Eg: ?order=random
"""
if "order" in request.GET:
- order_by = request.GET["order"]
+ order_by_list = request.GET["order"].split(",")
# Random ordering
- if order_by == "random":
+ if "random" in order_by_list:
+ if len(order_by_list) > 1:
+ raise BadRequestError(
+ "random ordering cannot be combined with other fields"
+ )
# Prevent ordering by random with offset
if "offset" in request.GET:
raise BadRequestError(
@@ -85,21 +89,28 @@
return queryset.order_by("?")
- # Check if reverse ordering is set
- if order_by.startswith("-"):
- reverse_order = True
- order_by = order_by[1:]
- else:
- reverse_order = False
-
- # Add ordering
- if order_by in view.get_available_fields(queryset.model):
- queryset = queryset.order_by(order_by)
- else:
- # Unknown field
- raise BadRequestError("cannot order by '%s' (unknown field)" % order_by)
-
- # Reverse order
+ order_by_fields = []
+ for order_by in order_by_list:
+ # Check if reverse ordering is set
+ if order_by.startswith("-"):
+ reverse_order = True
+ order_by = order_by[1:]
+ else:
+ reverse_order = False
+
+ # Add ordering
+ if order_by in view.get_available_fields(queryset.model):
+ order_by_fields.append(order_by)
+ else:
+ # Unknown field
+ raise BadRequestError(
+ "cannot order by '%s' (unknown field)" % order_by
+ )
+
+ # Apply ordering to the queryset
+ queryset = queryset.order_by(*order_by_fields)
+
+ # Reverse order if needed
if reverse_order:
queryset = queryset.reverse()
|
{"golden_diff": "diff --git a/wagtail/api/v2/filters.py b/wagtail/api/v2/filters.py\n--- a/wagtail/api/v2/filters.py\n+++ b/wagtail/api/v2/filters.py\n@@ -63,8 +63,8 @@\n class OrderingFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n- This applies ordering to the result set\n- Eg: ?order=title\n+ This applies ordering to the result set with support for multiple fields.\n+ Eg: ?order=title or ?order=title,created_at\n \n It also supports reverse ordering\n Eg: ?order=-title\n@@ -73,10 +73,14 @@\n Eg: ?order=random\n \"\"\"\n if \"order\" in request.GET:\n- order_by = request.GET[\"order\"]\n+ order_by_list = request.GET[\"order\"].split(\",\")\n \n # Random ordering\n- if order_by == \"random\":\n+ if \"random\" in order_by_list:\n+ if len(order_by_list) > 1:\n+ raise BadRequestError(\n+ \"random ordering cannot be combined with other fields\"\n+ )\n # Prevent ordering by random with offset\n if \"offset\" in request.GET:\n raise BadRequestError(\n@@ -85,21 +89,28 @@\n \n return queryset.order_by(\"?\")\n \n- # Check if reverse ordering is set\n- if order_by.startswith(\"-\"):\n- reverse_order = True\n- order_by = order_by[1:]\n- else:\n- reverse_order = False\n-\n- # Add ordering\n- if order_by in view.get_available_fields(queryset.model):\n- queryset = queryset.order_by(order_by)\n- else:\n- # Unknown field\n- raise BadRequestError(\"cannot order by '%s' (unknown field)\" % order_by)\n-\n- # Reverse order\n+ order_by_fields = []\n+ for order_by in order_by_list:\n+ # Check if reverse ordering is set\n+ if order_by.startswith(\"-\"):\n+ reverse_order = True\n+ order_by = order_by[1:]\n+ else:\n+ reverse_order = False\n+\n+ # Add ordering\n+ if order_by in view.get_available_fields(queryset.model):\n+ order_by_fields.append(order_by)\n+ else:\n+ # Unknown field\n+ raise BadRequestError(\n+ \"cannot order by '%s' (unknown field)\" % order_by\n+ )\n+\n+ # Apply ordering to the queryset\n+ queryset = queryset.order_by(*order_by_fields)\n+\n+ # Reverse order if needed\n if reverse_order:\n queryset = queryset.reverse()\n", "issue": "Allow ordering by multiple fields on the Wagtail API\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nThe Wagtail API does not allow ordering by multiple fields at the same time. For example, let's say I want a list of blog pages sorted by type and in reverse chronological order. This is currently not possible in the API.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\n`[GET] /api/v2/pages/?type=blog.BlogPage&order=-published_at,blog_type`\r\n\r\nThe result should be the same as if this query was run:\r\n\r\n```python\r\nBlogPage.objects.order_by('-published_at', 'blog_type')\r\n```\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nIn the meantime, we're going to add this functionality ad-hoc.\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\nThis is for a headless website.\r\n\nAllow ordering by multiple fields on the Wagtail API\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nThe Wagtail API does not allow ordering by multiple fields at the same time. For example, let's say I want a list of blog pages sorted by type and in reverse chronological order. This is currently not possible in the API.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\n`[GET] /api/v2/pages/?type=blog.BlogPage&order=-published_at,blog_type`\r\n\r\nThe result should be the same as if this query was run:\r\n\r\n```python\r\nBlogPage.objects.order_by('-published_at', 'blog_type')\r\n```\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nIn the meantime, we're going to add this functionality ad-hoc.\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\nThis is for a headless website.\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.filters import BaseFilterBackend\nfrom taggit.managers import TaggableManager\n\nfrom wagtail.models import Locale, Page\nfrom wagtail.search.backends import get_search_backend\nfrom wagtail.search.backends.base import FilterFieldError, OrderByFieldError\n\nfrom .utils import BadRequestError, parse_boolean\n\n\nclass FieldsFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n \"\"\"\n fields = set(view.get_available_fields(queryset.model, db_fields_only=True))\n\n # Locale is a database field, but we provide a separate filter for it\n if \"locale\" in fields:\n fields.remove(\"locale\")\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n try:\n field = queryset.model._meta.get_field(field_name)\n except LookupError:\n field = None\n\n # Convert value into python\n try:\n if isinstance(\n field, (models.BooleanField, models.NullBooleanField)\n ):\n value = parse_boolean(value)\n elif isinstance(field, (models.IntegerField, models.AutoField)):\n value = int(value)\n elif isinstance(field, models.ForeignKey):\n value = field.target_field.get_prep_value(value)\n except ValueError as e:\n raise BadRequestError(\n \"field filter error. '%s' is not a valid value for %s (%s)\"\n % (value, field_name, str(e))\n )\n\n if isinstance(field, TaggableManager):\n for tag in value.split(\",\"):\n queryset = queryset.filter(**{field_name + \"__name\": tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset\n\n\nclass OrderingFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This applies ordering to the result set\n Eg: ?order=title\n\n It also supports reverse ordering\n Eg: ?order=-title\n\n And random ordering\n Eg: ?order=random\n \"\"\"\n if \"order\" in request.GET:\n order_by = request.GET[\"order\"]\n\n # Random ordering\n if order_by == \"random\":\n # Prevent ordering by random with offset\n if \"offset\" in request.GET:\n raise BadRequestError(\n \"random ordering with offset is not supported\"\n )\n\n return queryset.order_by(\"?\")\n\n # Check if reverse ordering is set\n if order_by.startswith(\"-\"):\n reverse_order = True\n order_by = order_by[1:]\n else:\n reverse_order = False\n\n # Add ordering\n if order_by in view.get_available_fields(queryset.model):\n queryset = queryset.order_by(order_by)\n else:\n # Unknown field\n raise BadRequestError(\"cannot order by '%s' (unknown field)\" % order_by)\n\n # Reverse order\n if reverse_order:\n queryset = queryset.reverse()\n\n return queryset\n\n\nclass SearchFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This performs a full-text search on the result set\n Eg: ?search=James Joyce\n \"\"\"\n search_enabled = getattr(settings, \"WAGTAILAPI_SEARCH_ENABLED\", True)\n\n if \"search\" in request.GET:\n if not search_enabled:\n raise BadRequestError(\"search is disabled\")\n\n # Searching and filtering by tag at the same time is not supported\n if getattr(queryset, \"_filtered_by_tag\", False):\n raise BadRequestError(\n \"filtering by tag with a search query is not supported\"\n )\n\n search_query = request.GET[\"search\"]\n search_operator = request.GET.get(\"search_operator\", None)\n order_by_relevance = \"order\" not in request.GET\n\n sb = get_search_backend()\n try:\n queryset = sb.search(\n search_query,\n queryset,\n operator=search_operator,\n order_by_relevance=order_by_relevance,\n )\n except FilterFieldError as e:\n raise BadRequestError(\n \"cannot filter by '{}' while searching (field is not indexed)\".format(\n e.field_name\n )\n )\n except OrderByFieldError as e:\n raise BadRequestError(\n \"cannot order by '{}' while searching (field is not indexed)\".format(\n e.field_name\n )\n )\n\n return queryset\n\n\nclass ChildOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?child_of filter used to filter the results to only contain\n pages that are direct children of the specified page.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"child_of\" in request.GET:\n try:\n parent_page_id = int(request.GET[\"child_of\"])\n if parent_page_id < 0:\n raise ValueError()\n\n parent_page = view.get_base_queryset().get(id=parent_page_id)\n except ValueError:\n if request.GET[\"child_of\"] == \"root\":\n parent_page = view.get_root_page()\n else:\n raise BadRequestError(\"child_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"parent page doesn't exist\")\n\n queryset = queryset.child_of(parent_page)\n\n # Save the parent page on the queryset. This is required for the page\n # explorer, which needs to pass the parent page into\n # `construct_explorer_page_queryset` hook functions\n queryset._filtered_by_child_of = parent_page\n\n return queryset\n\n\nclass AncestorOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?ancestor filter which limits the set of pages to a\n particular branch of the page tree.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"ancestor_of\" in request.GET:\n try:\n descendant_page_id = int(request.GET[\"ancestor_of\"])\n if descendant_page_id < 0:\n raise ValueError()\n\n descendant_page = view.get_base_queryset().get(id=descendant_page_id)\n except ValueError:\n raise BadRequestError(\"ancestor_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"descendant page doesn't exist\")\n\n queryset = queryset.ancestor_of(descendant_page)\n\n return queryset\n\n\nclass DescendantOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?decendant_of filter which limits the set of pages to a\n particular branch of the page tree.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"descendant_of\" in request.GET:\n if hasattr(queryset, \"_filtered_by_child_of\"):\n raise BadRequestError(\n \"filtering by descendant_of with child_of is not supported\"\n )\n try:\n parent_page_id = int(request.GET[\"descendant_of\"])\n if parent_page_id < 0:\n raise ValueError()\n\n parent_page = view.get_base_queryset().get(id=parent_page_id)\n except ValueError:\n if request.GET[\"descendant_of\"] == \"root\":\n parent_page = view.get_root_page()\n else:\n raise BadRequestError(\"descendant_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"ancestor page doesn't exist\")\n\n queryset = queryset.descendant_of(parent_page)\n\n return queryset\n\n\nclass TranslationOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?translation_of filter which limits the set of pages to translations\n of a page.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"translation_of\" in request.GET:\n try:\n page_id = int(request.GET[\"translation_of\"])\n if page_id < 0:\n raise ValueError()\n\n page = view.get_base_queryset().get(id=page_id)\n except ValueError:\n if request.GET[\"translation_of\"] == \"root\":\n page = view.get_root_page()\n else:\n raise BadRequestError(\"translation_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"translation_of page doesn't exist\")\n\n _filtered_by_child_of = getattr(queryset, \"_filtered_by_child_of\", None)\n\n queryset = queryset.translation_of(page)\n\n if _filtered_by_child_of:\n queryset._filtered_by_child_of = _filtered_by_child_of\n\n return queryset\n\n\nclass LocaleFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?locale filter which limits the set of pages to a\n particular locale.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"locale\" in request.GET:\n _filtered_by_child_of = getattr(queryset, \"_filtered_by_child_of\", None)\n\n locale = get_object_or_404(Locale, language_code=request.GET[\"locale\"])\n queryset = queryset.filter(locale=locale)\n\n if _filtered_by_child_of:\n queryset._filtered_by_child_of = _filtered_by_child_of\n\n return queryset\n", "path": "wagtail/api/v2/filters.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.filters import BaseFilterBackend\nfrom taggit.managers import TaggableManager\n\nfrom wagtail.models import Locale, Page\nfrom wagtail.search.backends import get_search_backend\nfrom wagtail.search.backends.base import FilterFieldError, OrderByFieldError\n\nfrom .utils import BadRequestError, parse_boolean\n\n\nclass FieldsFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n \"\"\"\n fields = set(view.get_available_fields(queryset.model, db_fields_only=True))\n\n # Locale is a database field, but we provide a separate filter for it\n if \"locale\" in fields:\n fields.remove(\"locale\")\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n try:\n field = queryset.model._meta.get_field(field_name)\n except LookupError:\n field = None\n\n # Convert value into python\n try:\n if isinstance(\n field, (models.BooleanField, models.NullBooleanField)\n ):\n value = parse_boolean(value)\n elif isinstance(field, (models.IntegerField, models.AutoField)):\n value = int(value)\n elif isinstance(field, models.ForeignKey):\n value = field.target_field.get_prep_value(value)\n except ValueError as e:\n raise BadRequestError(\n \"field filter error. '%s' is not a valid value for %s (%s)\"\n % (value, field_name, str(e))\n )\n\n if isinstance(field, TaggableManager):\n for tag in value.split(\",\"):\n queryset = queryset.filter(**{field_name + \"__name\": tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset\n\n\nclass OrderingFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This applies ordering to the result set with support for multiple fields.\n Eg: ?order=title or ?order=title,created_at\n\n It also supports reverse ordering\n Eg: ?order=-title\n\n And random ordering\n Eg: ?order=random\n \"\"\"\n if \"order\" in request.GET:\n order_by_list = request.GET[\"order\"].split(\",\")\n\n # Random ordering\n if \"random\" in order_by_list:\n if len(order_by_list) > 1:\n raise BadRequestError(\n \"random ordering cannot be combined with other fields\"\n )\n # Prevent ordering by random with offset\n if \"offset\" in request.GET:\n raise BadRequestError(\n \"random ordering with offset is not supported\"\n )\n\n return queryset.order_by(\"?\")\n\n order_by_fields = []\n for order_by in order_by_list:\n # Check if reverse ordering is set\n if order_by.startswith(\"-\"):\n reverse_order = True\n order_by = order_by[1:]\n else:\n reverse_order = False\n\n # Add ordering\n if order_by in view.get_available_fields(queryset.model):\n order_by_fields.append(order_by)\n else:\n # Unknown field\n raise BadRequestError(\n \"cannot order by '%s' (unknown field)\" % order_by\n )\n\n # Apply ordering to the queryset\n queryset = queryset.order_by(*order_by_fields)\n\n # Reverse order if needed\n if reverse_order:\n queryset = queryset.reverse()\n\n return queryset\n\n\nclass SearchFilter(BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n This performs a full-text search on the result set\n Eg: ?search=James Joyce\n \"\"\"\n search_enabled = getattr(settings, \"WAGTAILAPI_SEARCH_ENABLED\", True)\n\n if \"search\" in request.GET:\n if not search_enabled:\n raise BadRequestError(\"search is disabled\")\n\n # Searching and filtering by tag at the same time is not supported\n if getattr(queryset, \"_filtered_by_tag\", False):\n raise BadRequestError(\n \"filtering by tag with a search query is not supported\"\n )\n\n search_query = request.GET[\"search\"]\n search_operator = request.GET.get(\"search_operator\", None)\n order_by_relevance = \"order\" not in request.GET\n\n sb = get_search_backend()\n try:\n queryset = sb.search(\n search_query,\n queryset,\n operator=search_operator,\n order_by_relevance=order_by_relevance,\n )\n except FilterFieldError as e:\n raise BadRequestError(\n \"cannot filter by '{}' while searching (field is not indexed)\".format(\n e.field_name\n )\n )\n except OrderByFieldError as e:\n raise BadRequestError(\n \"cannot order by '{}' while searching (field is not indexed)\".format(\n e.field_name\n )\n )\n\n return queryset\n\n\nclass ChildOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?child_of filter used to filter the results to only contain\n pages that are direct children of the specified page.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"child_of\" in request.GET:\n try:\n parent_page_id = int(request.GET[\"child_of\"])\n if parent_page_id < 0:\n raise ValueError()\n\n parent_page = view.get_base_queryset().get(id=parent_page_id)\n except ValueError:\n if request.GET[\"child_of\"] == \"root\":\n parent_page = view.get_root_page()\n else:\n raise BadRequestError(\"child_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"parent page doesn't exist\")\n\n queryset = queryset.child_of(parent_page)\n\n # Save the parent page on the queryset. This is required for the page\n # explorer, which needs to pass the parent page into\n # `construct_explorer_page_queryset` hook functions\n queryset._filtered_by_child_of = parent_page\n\n return queryset\n\n\nclass AncestorOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?ancestor filter which limits the set of pages to a\n particular branch of the page tree.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"ancestor_of\" in request.GET:\n try:\n descendant_page_id = int(request.GET[\"ancestor_of\"])\n if descendant_page_id < 0:\n raise ValueError()\n\n descendant_page = view.get_base_queryset().get(id=descendant_page_id)\n except ValueError:\n raise BadRequestError(\"ancestor_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"descendant page doesn't exist\")\n\n queryset = queryset.ancestor_of(descendant_page)\n\n return queryset\n\n\nclass DescendantOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?decendant_of filter which limits the set of pages to a\n particular branch of the page tree.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"descendant_of\" in request.GET:\n if hasattr(queryset, \"_filtered_by_child_of\"):\n raise BadRequestError(\n \"filtering by descendant_of with child_of is not supported\"\n )\n try:\n parent_page_id = int(request.GET[\"descendant_of\"])\n if parent_page_id < 0:\n raise ValueError()\n\n parent_page = view.get_base_queryset().get(id=parent_page_id)\n except ValueError:\n if request.GET[\"descendant_of\"] == \"root\":\n parent_page = view.get_root_page()\n else:\n raise BadRequestError(\"descendant_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"ancestor page doesn't exist\")\n\n queryset = queryset.descendant_of(parent_page)\n\n return queryset\n\n\nclass TranslationOfFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?translation_of filter which limits the set of pages to translations\n of a page.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"translation_of\" in request.GET:\n try:\n page_id = int(request.GET[\"translation_of\"])\n if page_id < 0:\n raise ValueError()\n\n page = view.get_base_queryset().get(id=page_id)\n except ValueError:\n if request.GET[\"translation_of\"] == \"root\":\n page = view.get_root_page()\n else:\n raise BadRequestError(\"translation_of must be a positive integer\")\n except Page.DoesNotExist:\n raise BadRequestError(\"translation_of page doesn't exist\")\n\n _filtered_by_child_of = getattr(queryset, \"_filtered_by_child_of\", None)\n\n queryset = queryset.translation_of(page)\n\n if _filtered_by_child_of:\n queryset._filtered_by_child_of = _filtered_by_child_of\n\n return queryset\n\n\nclass LocaleFilter(BaseFilterBackend):\n \"\"\"\n Implements the ?locale filter which limits the set of pages to a\n particular locale.\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n if \"locale\" in request.GET:\n _filtered_by_child_of = getattr(queryset, \"_filtered_by_child_of\", None)\n\n locale = get_object_or_404(Locale, language_code=request.GET[\"locale\"])\n queryset = queryset.filter(locale=locale)\n\n if _filtered_by_child_of:\n queryset._filtered_by_child_of = _filtered_by_child_of\n\n return queryset\n", "path": "wagtail/api/v2/filters.py"}]}
| 3,532 | 589 |
gh_patches_debug_12559
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
garage_benchmark can not run without Google Cloud credential
By default, log files in benchmark will not upload to GCP storage. However, now garage_benchmark cannot run without GCP credential, and will get errors like `google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials.` @gitanshu finds this is because of [this line](https://github.com/rlworkgroup/garage/blob/36f8fbaea5ff304fa3bfea2951d4b9ad3637825f/benchmarks/src/garage_benchmarks/helper.py#L48). This line shoud be moved into `_upload_to_gcp_storage` function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benchmarks/src/garage_benchmarks/helper.py`
Content:
```
1 """Helper functions for benchmarks.
2
3 A decorator and a function are provided for benchmarks. They essentially
4 take care log directory configuration and result plotting.
5
6 The decorator can be with or without parameters to specify options for
7 plotting and JSON exporting used for automatic benchmarking.
8
9 If plot is enabled, iterate_experiments() can be called with different
10 experiments function, each of which can have its specified env_ids. In
11 the end, figures will be generated per env_id.
12
13 Example:
14 @benchmark
15 def ppo_benchmarks():
16 iterate_experiments(ppo_garage_tf)
17 iterate_experiments(ppo_garage_pytorch)
18
19 @benchmark(plot=False)
20 def my_benchmarks():
21 iterate_experiments(trpo_garage_pytorch, env_ids=env_ids)
22 iterate_experiments(vpg_garage_pytorch, seeds=seeds)
23
24 Attributes:
25 _plot (dict): A dictionary used for plotting to figures
26 grouped by tasks. If no need to plot, _plot will be None.
27 _log_dir (str): Log directory for running the benchmarking.
28 _auto (bool): Global variable used to control whether to export
29 JSON files for automatic benchmarking.
30
31 """
32 import csv
33 import functools
34 import json
35 import os
36 import pathlib
37 import random
38
39 from google.cloud import storage
40 import matplotlib.pyplot as plt
41 import numpy as np
42 import tensorflow as tf
43
44 _plot = None
45 _log_dir = None
46 _auto = False
47
48 _bucket = storage.Client().bucket('resl-garage-benchmarks')
49
50
51 def benchmark(exec_func=None, *, plot=True, auto=False):
52 """Decorator for benchmark function.
53
54 Args:
55 exec_func (func): The experiment function.
56 plot (bool): Whether the result of this run needs to be plotted.
57 PNG files will be generated in sub folder /plot.
58 auto (auto): Whether this is automatic benchmarking. JSON files
59 will be generated in sub folder /auto.
60
61 Returns:
62 func: The wrapper function.
63
64 """
65 if exec_func is None:
66 return functools.partial(benchmark, plot=plot, auto=auto)
67
68 @functools.wraps(exec_func)
69 def wrapper_func():
70 """The wrapper function."""
71 # pylint: disable=global-statement
72 global _plot, _log_dir, _auto
73 _plot = {} if plot else None
74 plt.close('all')
75
76 _log_dir = _get_log_dir(exec_func.__name__)
77
78 if os.path.exists(_log_dir):
79 count = 1
80 while os.path.exists(_log_dir + '_' + str(count)):
81 count += 1
82 _log_dir = _log_dir + '_' + str(count)
83
84 if auto:
85 _auto = auto
86 auto_dir = os.path.join(_log_dir, 'auto')
87 os.makedirs(auto_dir)
88
89 exec_func()
90
91 if plot:
92 plot_dir = os.path.join(_log_dir, 'plot')
93 os.makedirs(plot_dir)
94 for env_id in _plot:
95 plt.figure(env_id)
96 plt.legend()
97 plt.xlabel(_plot[env_id]['xlabel'])
98 plt.ylabel(_plot[env_id]['ylabel'])
99 plt.title(env_id)
100 plt.savefig(plot_dir + '/' + env_id)
101
102 if auto:
103 _upload_to_gcp_storage(_log_dir)
104
105 return wrapper_func
106
107
108 def iterate_experiments(func,
109 env_ids,
110 seeds=None,
111 xcolumn='TotalEnvSteps',
112 xlabel='Total Environment Steps',
113 ycolumn='Evaluation/AverageReturn',
114 ylabel='Average Return'):
115 """Iterate experiments for benchmarking over env_ids and seeds.
116
117 Args:
118 env_ids (list[str]): List of environment ids.
119 seeds (list[int]): List of seeds.
120 func (func): The experiment function.
121 xcolumn (str): Which column should be the JSON x axis.
122 xlabel (str): Label name for x axis.
123 ycolumn (str): Which column should be the JSON y axis.
124 ylabel (str): Label name for y axis.
125
126 """
127 func_name = func.__name__.replace('_', '-')
128
129 if seeds is None:
130 seeds = random.sample(range(100), 4)
131
132 for env_id in env_ids:
133 task_ys = []
134
135 if _plot is not None and env_id not in _plot:
136 _plot[env_id] = {'xlabel': xlabel, 'ylabel': ylabel}
137
138 for seed in seeds:
139 exp_name = func_name + '_' + env_id + '_' + str(seed)
140 sub_log_dir = os.path.join(_log_dir, exp_name)
141
142 tf.compat.v1.reset_default_graph()
143
144 func(dict(log_dir=sub_log_dir), env_id=env_id, seed=seed)
145
146 if _plot is not None or _auto:
147 xs, ys = _read_csv(sub_log_dir, xcolumn, ycolumn)
148 task_ys.append(ys)
149
150 if _plot is not None or _auto:
151 ys_mean = np.array(task_ys).mean(axis=0)
152 ys_std = np.array(task_ys).std(axis=0)
153
154 if _plot is not None:
155 plt.figure(env_id)
156
157 plt.plot(xs, ys_mean, label=func_name)
158 plt.fill_between(xs, (ys_mean - ys_std), (ys_mean + ys_std),
159 alpha=.1)
160 if _auto:
161 _export_to_json(env_id + '_' + func_name, xs, xlabel, ys_mean,
162 ylabel, ys_std)
163
164
165 def _get_log_dir(exec_func_name):
166 """Get the log directory given the experiment name.
167
168 Args:
169 exec_func_name (str): The function name which runs benchmarks.
170
171 Returns:
172 str: Log directory.
173
174 """
175 cwd = pathlib.Path.cwd()
176 return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name))
177
178
179 def _read_csv(log_dir, xcolumn, ycolumn):
180 """Read csv files and return xs and ys.
181
182 Args:
183 log_dir (str): Log directory for csv file.
184 xcolumn (str): Which column should be the JSON x axis.
185 ycolumn (str): Which column should be the JSON y axis.
186
187 Returns:
188 list: List of x axis points.
189 list: List of y axis points.
190
191 """
192 xs, ys = [], []
193 with open(os.path.join(log_dir, 'progress.csv'), 'r') as csv_file:
194 for row in csv.DictReader(csv_file):
195 xs.append(float(row[xcolumn]))
196 ys.append(float(row[ycolumn]))
197
198 return xs, ys
199
200
201 def _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std):
202 """Save selected csv column to JSON preparing for automatic benchmarking.
203
204 Args:
205 json_name (str): The JSON file name.
206 xs (list): List of x axis points
207 xlabel (str): Label name for x axis.
208 ys (np.array): List of y axis points
209 ylabel (str): Label name for y axis.
210 ys_std (np.array): Standard deviation of y asis, used to calculate
211 upper and lower boundary for confidence interval.
212
213 """
214 json_path = os.path.join(_log_dir, 'auto', json_name + '.json')
215 with open(json_path, 'w') as json_file:
216 json.dump(
217 dict(x=xs,
218 y=ys.tolist(),
219 y_min=(ys - ys_std).tolist(),
220 y_max=(ys + ys_std).tolist(),
221 xlabel=xlabel,
222 ylabel=ylabel), json_file)
223
224
225 def _upload_to_gcp_storage(exec_dir):
226 """Upload all files to GCP storage under exec_dir folder.
227
228 Args:
229 exec_dir (str): The execution directory.
230
231 """
232 exec_name = os.path.basename(exec_dir)
233
234 for folder_name in os.listdir(exec_dir):
235 folder_path = os.path.join(exec_dir, folder_name)
236 if not os.path.isfile(folder_path):
237 remote_folder = os.path.join(exec_name, folder_name)
238
239 for file_name in os.listdir(folder_path):
240 file_path = os.path.join(folder_path, file_name)
241 if os.path.isfile(file_path):
242 blob = _bucket.blob(os.path.join(remote_folder, file_name))
243 blob.upload_from_filename(file_path)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benchmarks/src/garage_benchmarks/helper.py b/benchmarks/src/garage_benchmarks/helper.py
--- a/benchmarks/src/garage_benchmarks/helper.py
+++ b/benchmarks/src/garage_benchmarks/helper.py
@@ -45,8 +45,6 @@
_log_dir = None
_auto = False
-_bucket = storage.Client().bucket('resl-garage-benchmarks')
-
def benchmark(exec_func=None, *, plot=True, auto=False):
"""Decorator for benchmark function.
@@ -229,6 +227,7 @@
exec_dir (str): The execution directory.
"""
+ _bucket = storage.Client().bucket('resl-garage-benchmarks')
exec_name = os.path.basename(exec_dir)
for folder_name in os.listdir(exec_dir):
|
{"golden_diff": "diff --git a/benchmarks/src/garage_benchmarks/helper.py b/benchmarks/src/garage_benchmarks/helper.py\n--- a/benchmarks/src/garage_benchmarks/helper.py\n+++ b/benchmarks/src/garage_benchmarks/helper.py\n@@ -45,8 +45,6 @@\n _log_dir = None\n _auto = False\n \n-_bucket = storage.Client().bucket('resl-garage-benchmarks')\n-\n \n def benchmark(exec_func=None, *, plot=True, auto=False):\n \"\"\"Decorator for benchmark function.\n@@ -229,6 +227,7 @@\n exec_dir (str): The execution directory.\n \n \"\"\"\n+ _bucket = storage.Client().bucket('resl-garage-benchmarks')\n exec_name = os.path.basename(exec_dir)\n \n for folder_name in os.listdir(exec_dir):\n", "issue": "garage_benchmark can not run without Google Cloud credential\nBy default, log files in benchmark will not upload to GCP storage. However, now garage_benchmark cannot run without GCP credential, and will get errors like `google.auth.exceptions.DefaultCredentialsError: Could not automatically determine credentials.` @gitanshu finds this is because of [this line](https://github.com/rlworkgroup/garage/blob/36f8fbaea5ff304fa3bfea2951d4b9ad3637825f/benchmarks/src/garage_benchmarks/helper.py#L48). This line shoud be moved into `_upload_to_gcp_storage` function.\n", "before_files": [{"content": "\"\"\"Helper functions for benchmarks.\n\nA decorator and a function are provided for benchmarks. They essentially\ntake care log directory configuration and result plotting.\n\nThe decorator can be with or without parameters to specify options for\nplotting and JSON exporting used for automatic benchmarking.\n\nIf plot is enabled, iterate_experiments() can be called with different\nexperiments function, each of which can have its specified env_ids. In\nthe end, figures will be generated per env_id.\n\nExample:\n @benchmark\n def ppo_benchmarks():\n iterate_experiments(ppo_garage_tf)\n iterate_experiments(ppo_garage_pytorch)\n\n @benchmark(plot=False)\n def my_benchmarks():\n iterate_experiments(trpo_garage_pytorch, env_ids=env_ids)\n iterate_experiments(vpg_garage_pytorch, seeds=seeds)\n\nAttributes:\n _plot (dict): A dictionary used for plotting to figures\n grouped by tasks. If no need to plot, _plot will be None.\n _log_dir (str): Log directory for running the benchmarking.\n _auto (bool): Global variable used to control whether to export\n JSON files for automatic benchmarking.\n\n\"\"\"\nimport csv\nimport functools\nimport json\nimport os\nimport pathlib\nimport random\n\nfrom google.cloud import storage\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n_plot = None\n_log_dir = None\n_auto = False\n\n_bucket = storage.Client().bucket('resl-garage-benchmarks')\n\n\ndef benchmark(exec_func=None, *, plot=True, auto=False):\n \"\"\"Decorator for benchmark function.\n\n Args:\n exec_func (func): The experiment function.\n plot (bool): Whether the result of this run needs to be plotted.\n PNG files will be generated in sub folder /plot.\n auto (auto): Whether this is automatic benchmarking. JSON files\n will be generated in sub folder /auto.\n\n Returns:\n func: The wrapper function.\n\n \"\"\"\n if exec_func is None:\n return functools.partial(benchmark, plot=plot, auto=auto)\n\n @functools.wraps(exec_func)\n def wrapper_func():\n \"\"\"The wrapper function.\"\"\"\n # pylint: disable=global-statement\n global _plot, _log_dir, _auto\n _plot = {} if plot else None\n plt.close('all')\n\n _log_dir = _get_log_dir(exec_func.__name__)\n\n if os.path.exists(_log_dir):\n count = 1\n while os.path.exists(_log_dir + '_' + str(count)):\n count += 1\n _log_dir = _log_dir + '_' + str(count)\n\n if auto:\n _auto = auto\n auto_dir = os.path.join(_log_dir, 'auto')\n os.makedirs(auto_dir)\n\n exec_func()\n\n if plot:\n plot_dir = os.path.join(_log_dir, 'plot')\n os.makedirs(plot_dir)\n for env_id in _plot:\n plt.figure(env_id)\n plt.legend()\n plt.xlabel(_plot[env_id]['xlabel'])\n plt.ylabel(_plot[env_id]['ylabel'])\n plt.title(env_id)\n plt.savefig(plot_dir + '/' + env_id)\n\n if auto:\n _upload_to_gcp_storage(_log_dir)\n\n return wrapper_func\n\n\ndef iterate_experiments(func,\n env_ids,\n seeds=None,\n xcolumn='TotalEnvSteps',\n xlabel='Total Environment Steps',\n ycolumn='Evaluation/AverageReturn',\n ylabel='Average Return'):\n \"\"\"Iterate experiments for benchmarking over env_ids and seeds.\n\n Args:\n env_ids (list[str]): List of environment ids.\n seeds (list[int]): List of seeds.\n func (func): The experiment function.\n xcolumn (str): Which column should be the JSON x axis.\n xlabel (str): Label name for x axis.\n ycolumn (str): Which column should be the JSON y axis.\n ylabel (str): Label name for y axis.\n\n \"\"\"\n func_name = func.__name__.replace('_', '-')\n\n if seeds is None:\n seeds = random.sample(range(100), 4)\n\n for env_id in env_ids:\n task_ys = []\n\n if _plot is not None and env_id not in _plot:\n _plot[env_id] = {'xlabel': xlabel, 'ylabel': ylabel}\n\n for seed in seeds:\n exp_name = func_name + '_' + env_id + '_' + str(seed)\n sub_log_dir = os.path.join(_log_dir, exp_name)\n\n tf.compat.v1.reset_default_graph()\n\n func(dict(log_dir=sub_log_dir), env_id=env_id, seed=seed)\n\n if _plot is not None or _auto:\n xs, ys = _read_csv(sub_log_dir, xcolumn, ycolumn)\n task_ys.append(ys)\n\n if _plot is not None or _auto:\n ys_mean = np.array(task_ys).mean(axis=0)\n ys_std = np.array(task_ys).std(axis=0)\n\n if _plot is not None:\n plt.figure(env_id)\n\n plt.plot(xs, ys_mean, label=func_name)\n plt.fill_between(xs, (ys_mean - ys_std), (ys_mean + ys_std),\n alpha=.1)\n if _auto:\n _export_to_json(env_id + '_' + func_name, xs, xlabel, ys_mean,\n ylabel, ys_std)\n\n\ndef _get_log_dir(exec_func_name):\n \"\"\"Get the log directory given the experiment name.\n\n Args:\n exec_func_name (str): The function name which runs benchmarks.\n\n Returns:\n str: Log directory.\n\n \"\"\"\n cwd = pathlib.Path.cwd()\n return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name))\n\n\ndef _read_csv(log_dir, xcolumn, ycolumn):\n \"\"\"Read csv files and return xs and ys.\n\n Args:\n log_dir (str): Log directory for csv file.\n xcolumn (str): Which column should be the JSON x axis.\n ycolumn (str): Which column should be the JSON y axis.\n\n Returns:\n list: List of x axis points.\n list: List of y axis points.\n\n \"\"\"\n xs, ys = [], []\n with open(os.path.join(log_dir, 'progress.csv'), 'r') as csv_file:\n for row in csv.DictReader(csv_file):\n xs.append(float(row[xcolumn]))\n ys.append(float(row[ycolumn]))\n\n return xs, ys\n\n\ndef _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std):\n \"\"\"Save selected csv column to JSON preparing for automatic benchmarking.\n\n Args:\n json_name (str): The JSON file name.\n xs (list): List of x axis points\n xlabel (str): Label name for x axis.\n ys (np.array): List of y axis points\n ylabel (str): Label name for y axis.\n ys_std (np.array): Standard deviation of y asis, used to calculate\n upper and lower boundary for confidence interval.\n\n \"\"\"\n json_path = os.path.join(_log_dir, 'auto', json_name + '.json')\n with open(json_path, 'w') as json_file:\n json.dump(\n dict(x=xs,\n y=ys.tolist(),\n y_min=(ys - ys_std).tolist(),\n y_max=(ys + ys_std).tolist(),\n xlabel=xlabel,\n ylabel=ylabel), json_file)\n\n\ndef _upload_to_gcp_storage(exec_dir):\n \"\"\"Upload all files to GCP storage under exec_dir folder.\n\n Args:\n exec_dir (str): The execution directory.\n\n \"\"\"\n exec_name = os.path.basename(exec_dir)\n\n for folder_name in os.listdir(exec_dir):\n folder_path = os.path.join(exec_dir, folder_name)\n if not os.path.isfile(folder_path):\n remote_folder = os.path.join(exec_name, folder_name)\n\n for file_name in os.listdir(folder_path):\n file_path = os.path.join(folder_path, file_name)\n if os.path.isfile(file_path):\n blob = _bucket.blob(os.path.join(remote_folder, file_name))\n blob.upload_from_filename(file_path)\n", "path": "benchmarks/src/garage_benchmarks/helper.py"}], "after_files": [{"content": "\"\"\"Helper functions for benchmarks.\n\nA decorator and a function are provided for benchmarks. They essentially\ntake care log directory configuration and result plotting.\n\nThe decorator can be with or without parameters to specify options for\nplotting and JSON exporting used for automatic benchmarking.\n\nIf plot is enabled, iterate_experiments() can be called with different\nexperiments function, each of which can have its specified env_ids. In\nthe end, figures will be generated per env_id.\n\nExample:\n @benchmark\n def ppo_benchmarks():\n iterate_experiments(ppo_garage_tf)\n iterate_experiments(ppo_garage_pytorch)\n\n @benchmark(plot=False)\n def my_benchmarks():\n iterate_experiments(trpo_garage_pytorch, env_ids=env_ids)\n iterate_experiments(vpg_garage_pytorch, seeds=seeds)\n\nAttributes:\n _plot (dict): A dictionary used for plotting to figures\n grouped by tasks. If no need to plot, _plot will be None.\n _log_dir (str): Log directory for running the benchmarking.\n _auto (bool): Global variable used to control whether to export\n JSON files for automatic benchmarking.\n\n\"\"\"\nimport csv\nimport functools\nimport json\nimport os\nimport pathlib\nimport random\n\nfrom google.cloud import storage\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\n\n_plot = None\n_log_dir = None\n_auto = False\n\n\ndef benchmark(exec_func=None, *, plot=True, auto=False):\n \"\"\"Decorator for benchmark function.\n\n Args:\n exec_func (func): The experiment function.\n plot (bool): Whether the result of this run needs to be plotted.\n PNG files will be generated in sub folder /plot.\n auto (auto): Whether this is automatic benchmarking. JSON files\n will be generated in sub folder /auto.\n\n Returns:\n func: The wrapper function.\n\n \"\"\"\n if exec_func is None:\n return functools.partial(benchmark, plot=plot, auto=auto)\n\n @functools.wraps(exec_func)\n def wrapper_func():\n \"\"\"The wrapper function.\"\"\"\n # pylint: disable=global-statement\n global _plot, _log_dir, _auto\n _plot = {} if plot else None\n plt.close('all')\n\n _log_dir = _get_log_dir(exec_func.__name__)\n\n if os.path.exists(_log_dir):\n count = 1\n while os.path.exists(_log_dir + '_' + str(count)):\n count += 1\n _log_dir = _log_dir + '_' + str(count)\n\n if auto:\n _auto = auto\n auto_dir = os.path.join(_log_dir, 'auto')\n os.makedirs(auto_dir)\n\n exec_func()\n\n if plot:\n plot_dir = os.path.join(_log_dir, 'plot')\n os.makedirs(plot_dir)\n for env_id in _plot:\n plt.figure(env_id)\n plt.legend()\n plt.xlabel(_plot[env_id]['xlabel'])\n plt.ylabel(_plot[env_id]['ylabel'])\n plt.title(env_id)\n plt.savefig(plot_dir + '/' + env_id)\n\n if auto:\n _upload_to_gcp_storage(_log_dir)\n\n return wrapper_func\n\n\ndef iterate_experiments(func,\n env_ids,\n seeds=None,\n xcolumn='TotalEnvSteps',\n xlabel='Total Environment Steps',\n ycolumn='Evaluation/AverageReturn',\n ylabel='Average Return'):\n \"\"\"Iterate experiments for benchmarking over env_ids and seeds.\n\n Args:\n env_ids (list[str]): List of environment ids.\n seeds (list[int]): List of seeds.\n func (func): The experiment function.\n xcolumn (str): Which column should be the JSON x axis.\n xlabel (str): Label name for x axis.\n ycolumn (str): Which column should be the JSON y axis.\n ylabel (str): Label name for y axis.\n\n \"\"\"\n func_name = func.__name__.replace('_', '-')\n\n if seeds is None:\n seeds = random.sample(range(100), 4)\n\n for env_id in env_ids:\n task_ys = []\n\n if _plot is not None and env_id not in _plot:\n _plot[env_id] = {'xlabel': xlabel, 'ylabel': ylabel}\n\n for seed in seeds:\n exp_name = func_name + '_' + env_id + '_' + str(seed)\n sub_log_dir = os.path.join(_log_dir, exp_name)\n\n tf.compat.v1.reset_default_graph()\n\n func(dict(log_dir=sub_log_dir), env_id=env_id, seed=seed)\n\n if _plot is not None or _auto:\n xs, ys = _read_csv(sub_log_dir, xcolumn, ycolumn)\n task_ys.append(ys)\n\n if _plot is not None or _auto:\n ys_mean = np.array(task_ys).mean(axis=0)\n ys_std = np.array(task_ys).std(axis=0)\n\n if _plot is not None:\n plt.figure(env_id)\n\n plt.plot(xs, ys_mean, label=func_name)\n plt.fill_between(xs, (ys_mean - ys_std), (ys_mean + ys_std),\n alpha=.1)\n if _auto:\n _export_to_json(env_id + '_' + func_name, xs, xlabel, ys_mean,\n ylabel, ys_std)\n\n\ndef _get_log_dir(exec_func_name):\n \"\"\"Get the log directory given the experiment name.\n\n Args:\n exec_func_name (str): The function name which runs benchmarks.\n\n Returns:\n str: Log directory.\n\n \"\"\"\n cwd = pathlib.Path.cwd()\n return str(cwd.joinpath('data', 'local', 'benchmarks', exec_func_name))\n\n\ndef _read_csv(log_dir, xcolumn, ycolumn):\n \"\"\"Read csv files and return xs and ys.\n\n Args:\n log_dir (str): Log directory for csv file.\n xcolumn (str): Which column should be the JSON x axis.\n ycolumn (str): Which column should be the JSON y axis.\n\n Returns:\n list: List of x axis points.\n list: List of y axis points.\n\n \"\"\"\n xs, ys = [], []\n with open(os.path.join(log_dir, 'progress.csv'), 'r') as csv_file:\n for row in csv.DictReader(csv_file):\n xs.append(float(row[xcolumn]))\n ys.append(float(row[ycolumn]))\n\n return xs, ys\n\n\ndef _export_to_json(json_name, xs, xlabel, ys, ylabel, ys_std):\n \"\"\"Save selected csv column to JSON preparing for automatic benchmarking.\n\n Args:\n json_name (str): The JSON file name.\n xs (list): List of x axis points\n xlabel (str): Label name for x axis.\n ys (np.array): List of y axis points\n ylabel (str): Label name for y axis.\n ys_std (np.array): Standard deviation of y asis, used to calculate\n upper and lower boundary for confidence interval.\n\n \"\"\"\n json_path = os.path.join(_log_dir, 'auto', json_name + '.json')\n with open(json_path, 'w') as json_file:\n json.dump(\n dict(x=xs,\n y=ys.tolist(),\n y_min=(ys - ys_std).tolist(),\n y_max=(ys + ys_std).tolist(),\n xlabel=xlabel,\n ylabel=ylabel), json_file)\n\n\ndef _upload_to_gcp_storage(exec_dir):\n \"\"\"Upload all files to GCP storage under exec_dir folder.\n\n Args:\n exec_dir (str): The execution directory.\n\n \"\"\"\n _bucket = storage.Client().bucket('resl-garage-benchmarks')\n exec_name = os.path.basename(exec_dir)\n\n for folder_name in os.listdir(exec_dir):\n folder_path = os.path.join(exec_dir, folder_name)\n if not os.path.isfile(folder_path):\n remote_folder = os.path.join(exec_name, folder_name)\n\n for file_name in os.listdir(folder_path):\n file_path = os.path.join(folder_path, file_name)\n if os.path.isfile(file_path):\n blob = _bucket.blob(os.path.join(remote_folder, file_name))\n blob.upload_from_filename(file_path)\n", "path": "benchmarks/src/garage_benchmarks/helper.py"}]}
| 2,829 | 189 |
gh_patches_debug_7797
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-4085
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CIME mappy scripts_regression_tests in bad shape
I'm seeing tons of fails:
```
FAILED (failures=48, errors=7, skipped=21)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/code_checker.py`
Content:
```
1 """
2 Libraries for checking python code with pylint
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver
8
9 from multiprocessing.dummy import Pool as ThreadPool
10 #pylint: disable=import-error
11 from distutils.spawn import find_executable
12
13 logger = logging.getLogger(__name__)
14
15 ###############################################################################
16 def _run_pylint(on_file, interactive):
17 ###############################################################################
18 pylint = find_executable("pylint")
19
20 cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import"
21 cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement"
22 cmd_options += ",logging-format-interpolation,no-name-in-module"
23 cimeroot = get_cime_root()
24 srcroot = get_src_root()
25
26 if "scripts/Tools" in on_file:
27 cmd_options +=",relative-import"
28
29 # add init-hook option
30 cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\",\"%s\"))'"%\
31 (os.path.join(cimeroot,"scripts","lib"),
32 os.path.join(cimeroot,"scripts","Tools"),
33 os.path.join(cimeroot,"scripts","fortran_unit_testing","python"),
34 os.path.join(srcroot,"components","cmeps","cime_config","runseq"))
35
36 cmd = "%s %s %s" % (pylint, cmd_options, on_file)
37 logger.debug("pylint command is %s"%cmd)
38 stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)
39 if stat != 0:
40 if interactive:
41 logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd))
42 logger.info(out + "\n" + err)
43 return (on_file, out + "\n" + err)
44 else:
45 if interactive:
46 logger.info("File %s has no pylint problems" % on_file)
47 return (on_file, "")
48
49 ###############################################################################
50 def _matches(file_path, file_ends):
51 ###############################################################################
52 for file_end in file_ends:
53 if file_path.endswith(file_end):
54 return True
55
56 return False
57
58 ###############################################################################
59 def _should_pylint_skip(filepath):
60 ###############################################################################
61 # TODO - get rid of this
62 list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs", "doc")
63 for dir_to_skip in list_of_directories_to_ignore:
64 if dir_to_skip + "/" in filepath:
65 return True
66 if filepath == "scripts/lib/six.py":
67 return True
68 # intended to be temporary, file needs update
69 if filepath.endswith("archive_metadata") or filepath.endswith("pgn.py"):
70 return True
71
72 return False
73
74 ###############################################################################
75 def get_all_checkable_files():
76 ###############################################################################
77 cimeroot = get_cime_root()
78 all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines()
79 if get_cime_default_driver() == "nuopc":
80 srcroot = get_src_root()
81 nuopc_git_files = []
82 try:
83 nuopc_git_files = run_cmd_no_fail("git ls-files", from_dir=os.path.join(srcroot,"components","cmeps"), verbose=False).splitlines()
84 except:
85 logger.warning("No nuopc driver found in source")
86 all_git_files.extend([os.path.join(srcroot,"components","cmeps",_file) for _file in nuopc_git_files])
87 files_to_test = [item for item in all_git_files
88 if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]
89
90 return files_to_test
91
92 ###############################################################################
93 def check_code(files, num_procs=10, interactive=False):
94 ###############################################################################
95 """
96 Check all python files in the given directory
97
98 Returns True if all files had no problems
99 """
100 # Get list of files to check, we look to see if user-provided file argument
101 # is a valid file, if not, we search the repo for a file with similar name.
102 files_to_check = []
103 if files:
104 repo_files = get_all_checkable_files()
105 for filearg in files:
106 if os.path.exists(filearg):
107 files_to_check.append(os.path.abspath(filearg))
108 else:
109 found = False
110 for repo_file in repo_files:
111 if repo_file.endswith(filearg):
112 found = True
113 files_to_check.append(repo_file) # could have multiple matches
114
115 if not found:
116 logger.warning("Could not find file matching argument '%s'" % filearg)
117 else:
118 # Check every python file
119 files_to_check = get_all_checkable_files()
120
121 if "scripts/lib/six.py" in files_to_check:
122 files_to_check.remove("scripts/lib/six.py")
123 logger.info("Not checking contributed file six.py")
124
125 expect(len(files_to_check) > 0, "No matching files found")
126
127 # No point in using more threads than files
128 if len(files_to_check) < num_procs:
129 num_procs = len(files_to_check)
130
131 pool = ThreadPool(num_procs)
132 results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)
133 pool.close()
134 pool.join()
135 return dict(results)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py
--- a/scripts/lib/CIME/code_checker.py
+++ b/scripts/lib/CIME/code_checker.py
@@ -19,7 +19,7 @@
cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import"
cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement"
- cmd_options += ",logging-format-interpolation,no-name-in-module"
+ cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed"
cimeroot = get_cime_root()
srcroot = get_src_root()
|
{"golden_diff": "diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py\n--- a/scripts/lib/CIME/code_checker.py\n+++ b/scripts/lib/CIME/code_checker.py\n@@ -19,7 +19,7 @@\n \n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n- cmd_options += \",logging-format-interpolation,no-name-in-module\"\n+ cmd_options += \",logging-format-interpolation,no-name-in-module,arguments-renamed\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n", "issue": "CIME mappy scripts_regression_tests in bad shape\nI'm seeing tons of fails:\r\n\r\n```\r\nFAILED (failures=48, errors=7, skipped=21)\r\n```\n", "before_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n#pylint: disable=import-error\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cmd_options += \",logging-format-interpolation,no-name-in-module\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"),\n os.path.join(srcroot,\"components\",\"cmeps\",\"cime_config\",\"runseq\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\", \"doc\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip + \"/\" in filepath:\n return True\n if filepath == \"scripts/lib/six.py\":\n return True\n # intended to be temporary, file needs update\n if filepath.endswith(\"archive_metadata\") or filepath.endswith(\"pgn.py\"):\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n if get_cime_default_driver() == \"nuopc\":\n srcroot = get_src_root()\n nuopc_git_files = []\n try:\n nuopc_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=os.path.join(srcroot,\"components\",\"cmeps\"), verbose=False).splitlines()\n except:\n logger.warning(\"No nuopc driver found in source\")\n all_git_files.extend([os.path.join(srcroot,\"components\",\"cmeps\",_file) for _file in nuopc_git_files])\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n files_to_check = []\n if files:\n repo_files = get_all_checkable_files()\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n if \"scripts/lib/six.py\" in files_to_check:\n files_to_check.remove(\"scripts/lib/six.py\")\n logger.info(\"Not checking contributed file six.py\")\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}], "after_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver\n\nfrom multiprocessing.dummy import Pool as ThreadPool\n#pylint: disable=import-error\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import\"\n cmd_options += \",fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cmd_options += \",logging-format-interpolation,no-name-in-module,arguments-renamed\"\n cimeroot = get_cime_root()\n srcroot = get_src_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"),\n os.path.join(srcroot,\"components\",\"cmeps\",\"cime_config\",\"runseq\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\", \"doc\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip + \"/\" in filepath:\n return True\n if filepath == \"scripts/lib/six.py\":\n return True\n # intended to be temporary, file needs update\n if filepath.endswith(\"archive_metadata\") or filepath.endswith(\"pgn.py\"):\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n if get_cime_default_driver() == \"nuopc\":\n srcroot = get_src_root()\n nuopc_git_files = []\n try:\n nuopc_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=os.path.join(srcroot,\"components\",\"cmeps\"), verbose=False).splitlines()\n except:\n logger.warning(\"No nuopc driver found in source\")\n all_git_files.extend([os.path.join(\"components\",\"cmeps\",_file) for _file in nuopc_git_files])\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n files_to_check = []\n if files:\n repo_files = get_all_checkable_files()\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n if \"scripts/lib/six.py\" in files_to_check:\n files_to_check.remove(\"scripts/lib/six.py\")\n logger.info(\"Not checking contributed file six.py\")\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}]}
| 1,807 | 158 |
gh_patches_debug_61170
|
rasdani/github-patches
|
git_diff
|
ResonantGeoData__ResonantGeoData-470
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proper testing for rgd_client
We need to implement real tests for the Python client
The tests for this would require running RGD with data prepopulated in the background then executing the client tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rgd-client/rgd_client/rgdc.py`
Content:
```
1 from base64 import b64encode
2 from dataclasses import dataclass
3 import getpass
4 from pathlib import Path
5 import tempfile
6 from typing import Dict, Iterator, List, Optional, Tuple, Union
7
8 from tqdm import tqdm
9
10 from .session import RgdcSession
11 from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
12 from .utils import (
13 DEFAULT_RGD_API,
14 download_checksum_file_to_path,
15 limit_offset_pager,
16 spatial_search_params,
17 spatial_subentry_id,
18 )
19
20
21 @dataclass
22 class RasterDownload:
23 path: Path
24 images: List[Path]
25 ancillary: List[Path]
26
27
28 class Rgdc:
29 def __init__(
30 self,
31 api_url: str = DEFAULT_RGD_API,
32 username: Optional[str] = None,
33 password: Optional[str] = None,
34 ):
35 """
36 Initialize a RGD Client.
37
38 Args:
39 api_url: The base url of the RGD API instance.
40 username: The username to authenticate to the instance with, if any.
41 password: The password associated with the provided username. If None, a prompt will be provided.
42
43 Returns:
44 A new Rgdc instance.
45 """
46 auth_header = None
47
48 # Prompt for password if not provided
49 if username is not None and password is None:
50 password = getpass.getpass()
51
52 if username and password:
53 encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()
54 auth_header = f'Basic {encoded_credentials}'
55
56 self.session = RgdcSession(base_url=api_url, auth_header=auth_header)
57
58 def list_image_tiles(self, image_id: Union[str, int]) -> Dict:
59 """List geodata imagery tiles."""
60 r = self.session.get(f'image_process/imagery/{image_id}/tiles')
61 return r.json()
62
63 def download_image_file(
64 self, image_id: Union[str, int], chunk_size: int = 1024 * 1024
65 ) -> Iterator[bytes]:
66 """
67 Download the associated ImageFile data for this ImageEntry directly from S3.
68
69 Args:
70 image_id: The ID of the ImageEntry to download.
71 chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).
72
73 Returns:
74 An iterator of byte chunks.
75 """
76 r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)
77 return r.iter_content(chunk_size=chunk_size)
78
79 def download_image_thumbnail(
80 self,
81 image_id: Union[str, int],
82 ) -> bytes:
83 """
84 Download the generated thumbnail for this ImageEntry.
85
86 Args:
87 image_id: The ID of the ImageEntry to download.
88
89 Returns:
90 Thumbnail bytes.
91 """
92 r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')
93 return r.content
94
95 def download_raster_thumbnail(
96 self,
97 raster_meta_id: Union[str, int, dict],
98 band: int = 0,
99 ) -> bytes:
100 """
101 Download the generated thumbnail for this ImageEntry.
102
103 Args:
104 raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
105 band: The index of the image in the raster's image set to produce thumbnail from.
106
107 Returns:
108 Thumbnail bytes.
109 """
110 if isinstance(raster_meta_id, dict):
111 raster_meta_id = spatial_subentry_id(raster_meta_id)
112
113 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
114 parent_raster = r.json().get('parent_raster', {})
115 images = parent_raster.get('image_set', {}).get('images', [])
116 try:
117 return self.download_image_thumbnail(images[band]['id'])
118 except IndexError:
119 raise IndexError(f'Band index ({band}) out of range.')
120
121 def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:
122 """Get raster entry detail.
123
124 Args:
125 stac: Optionally return as STAC Item dictionary/JSON.
126
127 Returns:
128 Serialized object representation.
129 """
130 if isinstance(raster_meta_id, dict):
131 raster_meta_id = spatial_subentry_id(raster_meta_id)
132
133 if stac:
134 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')
135 else:
136 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
137 return r.json()
138
139 def download_raster(
140 self,
141 raster_meta_id: Union[str, int, dict],
142 pathname: Optional[str] = None,
143 nest_with_name: bool = False,
144 keep_existing: bool = True,
145 ) -> RasterDownload:
146 """
147 Download the image set associated with a raster entry to disk.
148
149 Args:
150 raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
151 pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.
152 nest_with_name: If True, nests the download within an additional directory, using the raster entry name.
153 keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.
154
155 Returns:
156 A dictionary of the paths to all files downloaded under the directory.
157 """
158 if isinstance(raster_meta_id, dict):
159 raster_meta_id = spatial_subentry_id(raster_meta_id)
160
161 r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
162 parent_raster = r.json().get('parent_raster', {})
163
164 # Create dirs after request to avoid empty dirs if failed
165 if pathname is None:
166 pathname = tempfile.mkdtemp()
167
168 # Handle optional nesting with raster entry name
169 path = Path(pathname)
170 parent_raster_name: Optional[str] = parent_raster.get('name')
171
172 if nest_with_name and parent_raster_name:
173 path = path / parent_raster_name
174
175 # Ensure base download directory exists
176 if not path.exists():
177 path.mkdir()
178
179 # Initialize dataclass
180 raster_download = RasterDownload(path, [], [])
181
182 # Download images
183 images = parent_raster.get('image_set', {}).get('images', [])
184 for image in tqdm(images, desc='Downloading image files'):
185 file = image.get('file', {})
186 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
187 if file_path:
188 raster_download.images.append(file_path)
189
190 # Download ancillary files
191 ancillary = parent_raster.get('ancillary_files', [])
192 for file in tqdm(ancillary, desc='Downloading ancillary files'):
193 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
194 if file_path:
195 raster_download.ancillary.append(file_path)
196
197 return raster_download
198
199 def search(
200 self,
201 query: Optional[Union[Dict, str]] = None,
202 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
203 relates: Optional[str] = None,
204 distance: Optional[Tuple[float, float]] = None,
205 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
206 instrumentation: Optional[str] = None,
207 limit: Optional[int] = None,
208 offset: Optional[int] = None,
209 ) -> List[Dict]:
210 """
211 Search for geospatial entries based on various criteria.
212
213 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
214 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
215
216 Args:
217 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
218 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
219 be used to filter data such that predicate(a, b) where b is the queried geometry.
220 relates: Specify exactly how the queried geometry should relate to the data using a
221 DE-9IM string code.
222 distance: The min/max distance around the queried geometry in meters.
223 acquired: The min/max date and time (ISO 8601) when data was acquired.
224 instrumentation: The instrumentation used to acquire at least one of these data.
225 limit: The maximum number of results to return.
226 offset: The number of results to skip.
227
228 Returns:
229 A list of Spatial Entries.
230 """
231 params = spatial_search_params(
232 query=query,
233 predicate=predicate,
234 relates=relates,
235 distance=distance,
236 acquired=acquired,
237 instrumentation=instrumentation,
238 limit=limit,
239 offset=offset,
240 )
241 return list(limit_offset_pager(self.session, 'rgd/search', params=params))
242
243 def create_raster_stac(self, raster: Dict) -> Dict:
244 """Create a raster entry using STAC format."""
245 r = self.session.post('rgd_imagery/raster/stac', json=raster)
246 r.raise_for_status()
247
248 return r.json()
249
250 def search_raster_stac(
251 self,
252 query: Optional[Union[Dict, str]] = None,
253 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
254 relates: Optional[str] = None,
255 distance: Optional[Tuple[float, float]] = None,
256 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
257 instrumentation: Optional[str] = None,
258 num_bands: Optional[Tuple[int, int]] = None,
259 resolution: Optional[Tuple[int, int]] = None,
260 cloud_cover: Optional[Tuple[float, float]] = None,
261 limit: Optional[int] = None,
262 offset: Optional[int] = None,
263 ) -> List[Dict]:
264 """
265 Search for raster entries based on various criteria.
266
267 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
268 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
269
270 Args:
271 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
272 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
273 be used to filter data such that predicate(a, b) where b is the queried geometry.
274 relates: Specify exactly how the queried geometry should relate to the data using a
275 DE-9IM string code.
276 distance: The min/max distance around the queried geometry in meters.
277 acquired: The min/max date and time (ISO 8601) when data was acquired.
278 instrumentation: The instrumentation used to acquire at least one of these data.
279 num_bands: The min/max number of bands in the raster.
280 resolution: The min/max resolution of the raster.
281 cloud_cover: The min/max cloud coverage of the raster.
282 limit: The maximum number of results to return.
283 offset: The number of results to skip.
284
285 Returns:
286 A list of Spatial Entries in STAC Item format.
287 """
288 params = spatial_search_params(
289 query=query,
290 predicate=predicate,
291 relates=relates,
292 distance=distance,
293 acquired=acquired,
294 instrumentation=instrumentation,
295 limit=limit,
296 offset=offset,
297 )
298
299 if num_bands and len(num_bands) == 2:
300 nbmin, nbmax = num_bands
301 params['num_bands_min'] = nbmin
302 params['num_bands_max'] = nbmax
303
304 if resolution and len(resolution) == 2:
305 rmin, rmax = resolution
306 params['resolution_min'] = rmin
307 params['resolution_max'] = rmax
308
309 if cloud_cover and len(cloud_cover) == 2:
310 ccmin, ccmax = cloud_cover
311 params['cloud_cover_min'] = ccmin
312 params['cloud_cover_max'] = ccmax
313
314 return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rgd-client/rgd_client/rgdc.py b/rgd-client/rgd_client/rgdc.py
--- a/rgd-client/rgd_client/rgdc.py
+++ b/rgd-client/rgd_client/rgdc.py
@@ -238,7 +238,11 @@
limit=limit,
offset=offset,
)
- return list(limit_offset_pager(self.session, 'rgd/search', params=params))
+
+ r = self.session.get('rgd/search', params=params)
+ r.raise_for_status()
+
+ return r.json()
def create_raster_stac(self, raster: Dict) -> Dict:
"""Create a raster entry using STAC format."""
|
{"golden_diff": "diff --git a/rgd-client/rgd_client/rgdc.py b/rgd-client/rgd_client/rgdc.py\n--- a/rgd-client/rgd_client/rgdc.py\n+++ b/rgd-client/rgd_client/rgdc.py\n@@ -238,7 +238,11 @@\n limit=limit,\n offset=offset,\n )\n- return list(limit_offset_pager(self.session, 'rgd/search', params=params))\n+\n+ r = self.session.get('rgd/search', params=params)\n+ r.raise_for_status()\n+\n+ return r.json()\n \n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n", "issue": "Proper testing for rgd_client\nWe need to implement real tests for the Python client\r\n\r\nThe tests for this would require running RGD with data prepopulated in the background then executing the client tests\n", "before_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'rgd/search', params=params))\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n", "path": "rgd-client/rgd_client/rgdc.py"}], "after_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n r = self.session.get('rgd/search', params=params)\n r.raise_for_status()\n\n return r.json()\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n", "path": "rgd-client/rgd_client/rgdc.py"}]}
| 3,843 | 164 |
gh_patches_debug_11000
|
rasdani/github-patches
|
git_diff
|
sotetsuk__pgx-907
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `pgx.__version__`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2 from pathlib import Path
3
4 long_description = (Path(__file__).parent / "README.md").read_text()
5
6 setup(
7 name="pgx",
8 version="0.7.4",
9 description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)",
10 long_description=long_description,
11 long_description_content_type="text/markdown",
12 url="https://github.com/sotetsuk/pgx",
13 author="Sotetsu KOYAMADA",
14 author_email="[email protected]",
15 keywords="",
16 packages=find_packages(),
17 package_data={
18 "": ["LICENSE", "*.svg", "_src/assets/*.npy", "_mahjong/*.json"]
19 },
20 include_package_data=True,
21 install_requires=[
22 "jax>=0.3.25", # JAX version on Colab (TPU)
23 "svgwrite",
24 "typing_extensions",
25 ],
26 classifiers=[
27 "Programming Language :: Python :: 3.8",
28 "Programming Language :: Python :: 3.9",
29 "Programming Language :: Python :: 3.10",
30 ],
31 )
32
```
Path: `pgx/__init__.py`
Content:
```
1 from pgx._src.api_test import v1_api_test
2 from pgx._src.visualizer import (
3 save_svg,
4 save_svg_animation,
5 set_visualization_config,
6 )
7 from pgx.v1 import Env, EnvId, State, available_envs, make
8
9 __all__ = [
10 # v1 api components
11 "State",
12 "Env",
13 "EnvId",
14 "make",
15 "available_envs",
16 # visualization
17 "set_visualization_config",
18 "save_svg",
19 "save_svg_animation",
20 # api tests
21 "v1_api_test",
22 ]
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pgx/__init__.py b/pgx/__init__.py
--- a/pgx/__init__.py
+++ b/pgx/__init__.py
@@ -6,6 +6,8 @@
)
from pgx.v1 import Env, EnvId, State, available_envs, make
+__version__ = "0.8.0"
+
__all__ = [
# v1 api components
"State",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,6 @@
setup(
name="pgx",
- version="0.7.4",
description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)",
long_description=long_description,
long_description_content_type="text/markdown",
|
{"golden_diff": "diff --git a/pgx/__init__.py b/pgx/__init__.py\n--- a/pgx/__init__.py\n+++ b/pgx/__init__.py\n@@ -6,6 +6,8 @@\n )\n from pgx.v1 import Env, EnvId, State, available_envs, make\n \n+__version__ = \"0.8.0\"\n+\n __all__ = [\n # v1 api components\n \"State\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,6 @@\n \n setup(\n name=\"pgx\",\n- version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n", "issue": "Add `pgx.__version__`\n\n", "before_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n version=\"0.7.4\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sotetsuk/pgx\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\n \"\": [\"LICENSE\", \"*.svg\", \"_src/assets/*.npy\", \"_mahjong/*.json\"]\n },\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"svgwrite\",\n \"typing_extensions\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "from pgx._src.api_test import v1_api_test\nfrom pgx._src.visualizer import (\n save_svg,\n save_svg_animation,\n set_visualization_config,\n)\nfrom pgx.v1 import Env, EnvId, State, available_envs, make\n\n__all__ = [\n # v1 api components\n \"State\",\n \"Env\",\n \"EnvId\",\n \"make\",\n \"available_envs\",\n # visualization\n \"set_visualization_config\",\n \"save_svg\",\n \"save_svg_animation\",\n # api tests\n \"v1_api_test\",\n]\n", "path": "pgx/__init__.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sotetsuk/pgx\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\n \"\": [\"LICENSE\", \"*.svg\", \"_src/assets/*.npy\", \"_mahjong/*.json\"]\n },\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"svgwrite\",\n \"typing_extensions\",\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}, {"content": "from pgx._src.api_test import v1_api_test\nfrom pgx._src.visualizer import (\n save_svg,\n save_svg_animation,\n set_visualization_config,\n)\nfrom pgx.v1 import Env, EnvId, State, available_envs, make\n\n__version__ = \"0.8.0\"\n\n__all__ = [\n # v1 api components\n \"State\",\n \"Env\",\n \"EnvId\",\n \"make\",\n \"available_envs\",\n # visualization\n \"set_visualization_config\",\n \"save_svg\",\n \"save_svg_animation\",\n # api tests\n \"v1_api_test\",\n]\n", "path": "pgx/__init__.py"}]}
| 755 | 182 |
gh_patches_debug_20075
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1595
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 3.6
We should now drop Python 3.6 as 0.19 is out.
_Originally posted by @Cadair in https://github.com/opsdroid/opsdroid/issues/1534#issuecomment-644825884_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/cli/utils.py`
Content:
```
1 """Utilities for the opsdroid CLI commands."""
2
3 import click
4 import contextlib
5 import gettext
6 import os
7 import logging
8 import subprocess
9 import sys
10 import time
11
12 from opsdroid.core import OpsDroid
13 from opsdroid.configuration import load_config_file
14 from opsdroid.const import (
15 LOCALE_DIR,
16 DEFAULT_LANGUAGE,
17 DEFAULT_CONFIG_PATH,
18 DEFAULT_CONFIG_LOCATIONS,
19 )
20 from opsdroid.helper import get_config_option
21 from opsdroid.loader import Loader
22 from opsdroid.logging import configure_logging
23
24
25 _LOGGER = logging.getLogger("opsdroid")
26
27 path_option = click.option(
28 "-f",
29 "path",
30 help="Load a configuration from a path instead of using the default location.",
31 type=click.Path(exists=True),
32 )
33
34
35 def edit_config(ctx, path):
36 """Open config/log file with favourite editor.
37
38 Args:
39 ctx (:obj:`click.Context`): The current click cli context.
40 path (str or None): the path passed to the config option.
41 value (string): the value of this parameter after invocation.
42
43 Returns:
44 int: the exit code. Always returns 0 in this case.
45
46 """
47 file = path or DEFAULT_CONFIG_PATH
48 editor = os.environ.get("EDITOR", "vi")
49
50 if editor == "vi":
51 click.echo(
52 "You are about to edit a file in vim. \n"
53 "Read the tutorial on vim at: https://bit.ly/2HRvvrB"
54 )
55 time.sleep(1.5)
56
57 subprocess.run([editor, file])
58 ctx.exit(0)
59
60
61 def validate_config(ctx, path):
62 """Validate opsdroid configuration.
63
64 We load the configuration and modules from it to run the validation on them.
65 Only modules that contain the constant variable `CONFIG_SCHEMA` will be validated
66 the ones without it will just be silent.
67
68 Note that if the path doesn't exist or is a bad one click will throw an error telling
69 you that the path doesn't exist. Also, the file needs to be either a json or a yaml file.
70
71
72 Args:
73 ctx (:obj:`click.Context`): The current click cli context.
74 path (string): a string representing the path to load the config,
75 obtained from `ctx.obj`.
76 value (string): the value of this parameter after invocation.
77 It is either "config" or "log" depending on the program
78 calling this function.
79
80 Returns:
81 int: the exit code. Always returns 0 in this case.
82
83 """
84 with OpsDroid() as opsdroid:
85 loader = Loader(opsdroid)
86
87 config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)
88
89 loader.load_modules_from_config(config)
90 click.echo("Configuration validated - No errors founds!")
91
92 ctx.exit(0)
93
94
95 def configure_lang(config):
96 """Configure app language based on user config.
97
98 Args:
99 config: Language Configuration and it uses ISO 639-1 code.
100 for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes
101
102
103 """
104 lang_code = config.get("lang", DEFAULT_LANGUAGE)
105 if lang_code != DEFAULT_LANGUAGE:
106 lang = gettext.translation("opsdroid", LOCALE_DIR, (lang_code,), fallback=True)
107 lang.install()
108
109
110 def check_dependencies():
111 """Check for system dependencies required by opsdroid.
112
113 Returns:
114 int: the exit code. Returns 1 if the Python version installed is
115 below 3.6.
116
117 """
118 if sys.version_info.major < 3 or sys.version_info.minor < 6:
119 logging.critical(_("Whoops! opsdroid requires python 3.6 or above."))
120 sys.exit(1)
121
122
123 def welcome_message(config):
124 """Add welcome message if set to true in configuration.
125
126 Args:
127 config: config loaded by Loader
128
129 Raises:
130 KeyError: If 'welcome-message' key is not found in configuration file
131
132 """
133 try:
134 if config["welcome-message"]:
135 _LOGGER.info("=" * 40)
136 _LOGGER.info(
137 _(
138 "You can customise your opsdroid by modifying your configuration.yaml."
139 )
140 )
141 _LOGGER.info(
142 _("Read more at: http://opsdroid.readthedocs.io/#configuration")
143 )
144 _LOGGER.info(_("Watch the Get Started Videos at: http://bit.ly/2fnC0Fh"))
145 _LOGGER.info(
146 _(
147 "Install Opsdroid Desktop at: \n"
148 "https://github.com/opsdroid/opsdroid-desktop/releases"
149 )
150 )
151 _LOGGER.info("=" * 40)
152 except KeyError:
153 _LOGGER.warning(
154 _("'welcome-message: true/false' is missing in configuration.yaml")
155 )
156
157
158 def list_all_modules(ctx, path):
159 """List the active modules from config.
160
161 This function will try to get information from the modules that are active in the
162 configuration file and print them as a table or will just print a sentence saying that
163 there are no active modules for that type.
164
165 Args:
166 ctx (:obj:`click.Context`): The current click cli context.
167 path (str): a str that contains a path passed.
168
169 Returns:
170 int: the exit code. Always returns 0 in this case.
171
172 """
173 config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)
174
175 click.echo(
176 click.style(
177 f"{'NAME':15} {'TYPE':15} {'MODE':15} {'CACHED':15} {'LOCATION':15}",
178 fg="blue",
179 bold=True,
180 )
181 )
182 for module_type, module in config.items():
183 if module_type in ("connectors", "databases", "parsers", "skills"):
184 for name, options in module.items():
185
186 mode = get_config_option(
187 ["repo", "path", "gist"], options, True, "module"
188 )
189 cache = get_config_option(["no-cache"], options, "no", "yes")
190 location = get_config_option(
191 ["repo", "path", "gist"],
192 options,
193 True,
194 f"opsdroid.{module_type}.{name}",
195 )
196
197 click.echo(
198 f"{name:15} {module_type:15} {mode[1]:15} {cache[0]:15} {location[2]:15}"
199 )
200
201 ctx.exit(0)
202
203
204 def build_config(ctx, params):
205 """Load configuration, load modules and install dependencies.
206
207 This function loads the configuration and install all necessary
208 dependencies defined on a `requirements.txt` file inside the module.
209 If the flag `--verbose` is passed the logging level will be set as debug and
210 all logs will be shown to the user.
211
212
213 Args:
214 ctx (:obj:`click.Context`): The current click cli context.
215 params (dict): a dictionary of all parameters pass to the click
216 context when invoking this function as a callback.
217
218 Returns:
219 int: the exit code. Always returns 0 in this case.
220
221 """
222 click.echo("Opsdroid will build modules from config.")
223 path = params.get("path")
224
225 with contextlib.suppress(Exception):
226 check_dependencies()
227
228 config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)
229
230 if params["verbose"]:
231 config["logging"] = {"level": "debug"}
232 configure_logging(config)
233
234 with OpsDroid(config=config) as opsdroid:
235
236 opsdroid.loader.load_modules_from_config(config)
237
238 click.echo(click.style("SUCCESS:", bg="green", bold=True), nl=False)
239 click.echo(" Opsdroid modules successfully built from config.")
240
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = "opsdroid"
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, "README.md"), encoding="utf8").read()
12
13 PACKAGES = find_packages(
14 exclude=["tests", "tests.*", "modules", "modules.*", "docs", "docs.*"]
15 )
16
17
18 # For now we simply define the install_requires based on the contents
19 # of requirements.txt. In the future, install_requires may become much
20 # looser than the (automatically) resolved requirements.txt.
21 with open(os.path.join(HERE, "requirements.txt"), "r") as fh:
22 REQUIRES = [line.strip() for line in fh]
23
24
25 class Develop(develop):
26 """Custom `develop` command to always build mo files on install -e."""
27
28 def run(self):
29 self.run_command("compile_catalog")
30 develop.run(self) # old style class
31
32
33 class BuildPy(build_py):
34 """Custom `build_py` command to always build mo files for wheels."""
35
36 def run(self):
37 self.run_command("compile_catalog")
38 build_py.run(self) # old style class
39
40
41 class Sdist(sdist):
42 """Custom `sdist` command to ensure that mo files are always created."""
43
44 def run(self):
45 self.run_command("compile_catalog")
46 sdist.run(self) # old style class
47
48
49 setup(
50 name=PACKAGE_NAME,
51 version=versioneer.get_version(),
52 license="Apache License 2.0",
53 url="https://opsdroid.github.io/",
54 download_url="https://github.com/opsdroid/opsdroid/releases",
55 author="Jacob Tomlinson",
56 author_email="[email protected]",
57 description="An open source ChatOps bot framework.",
58 long_description=README,
59 long_description_content_type="text/markdown",
60 packages=PACKAGES,
61 include_package_data=True,
62 zip_safe=False,
63 platforms="any",
64 classifiers=[
65 "Development Status :: 4 - Beta",
66 "Environment :: Console",
67 "Framework :: AsyncIO",
68 "Intended Audience :: Developers",
69 "Intended Audience :: System Administrators",
70 "Intended Audience :: Information Technology",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Programming Language :: Python :: 3.8",
78 "Topic :: Communications :: Chat",
79 "Topic :: Scientific/Engineering :: Artificial Intelligence",
80 "Topic :: Software Development :: Libraries :: Python Modules",
81 ],
82 install_requires=REQUIRES,
83 test_suite="tests",
84 keywords=[
85 "bot",
86 "bot-framework",
87 "opsdroid",
88 "botkit",
89 "python3",
90 "asyncio",
91 "chatops",
92 "devops",
93 "nlu",
94 ],
95 setup_requires=["Babel"],
96 cmdclass=versioneer.get_cmdclass(
97 {"sdist": Sdist, "build_py": BuildPy, "develop": Develop}
98 ),
99 entry_points={"console_scripts": ["opsdroid = opsdroid.cli:cli"]},
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/cli/utils.py b/opsdroid/cli/utils.py
--- a/opsdroid/cli/utils.py
+++ b/opsdroid/cli/utils.py
@@ -112,11 +112,11 @@
Returns:
int: the exit code. Returns 1 if the Python version installed is
- below 3.6.
+ below 3.7.
"""
- if sys.version_info.major < 3 or sys.version_info.minor < 6:
- logging.critical(_("Whoops! opsdroid requires python 3.6 or above."))
+ if sys.version_info.major < 3 or sys.version_info.minor < 7:
+ logging.critical(_("Whoops! opsdroid requires python 3.7 or above."))
sys.exit(1)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,7 +72,6 @@
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Communications :: Chat",
|
{"golden_diff": "diff --git a/opsdroid/cli/utils.py b/opsdroid/cli/utils.py\n--- a/opsdroid/cli/utils.py\n+++ b/opsdroid/cli/utils.py\n@@ -112,11 +112,11 @@\n \n Returns:\n int: the exit code. Returns 1 if the Python version installed is\n- below 3.6.\n+ below 3.7.\n \n \"\"\"\n- if sys.version_info.major < 3 or sys.version_info.minor < 6:\n- logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n+ if sys.version_info.major < 3 or sys.version_info.minor < 7:\n+ logging.critical(_(\"Whoops! opsdroid requires python 3.7 or above.\"))\n sys.exit(1)\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,7 +72,6 @@\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n- \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n", "issue": "Drop Python 3.6\nWe should now drop Python 3.6 as 0.19 is out.\r\n\r\n_Originally posted by @Cadair in https://github.com/opsdroid/opsdroid/issues/1534#issuecomment-644825884_\n", "before_files": [{"content": "\"\"\"Utilities for the opsdroid CLI commands.\"\"\"\n\nimport click\nimport contextlib\nimport gettext\nimport os\nimport logging\nimport subprocess\nimport sys\nimport time\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.configuration import load_config_file\nfrom opsdroid.const import (\n LOCALE_DIR,\n DEFAULT_LANGUAGE,\n DEFAULT_CONFIG_PATH,\n DEFAULT_CONFIG_LOCATIONS,\n)\nfrom opsdroid.helper import get_config_option\nfrom opsdroid.loader import Loader\nfrom opsdroid.logging import configure_logging\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\npath_option = click.option(\n \"-f\",\n \"path\",\n help=\"Load a configuration from a path instead of using the default location.\",\n type=click.Path(exists=True),\n)\n\n\ndef edit_config(ctx, path):\n \"\"\"Open config/log file with favourite editor.\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (str or None): the path passed to the config option.\n value (string): the value of this parameter after invocation.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n file = path or DEFAULT_CONFIG_PATH\n editor = os.environ.get(\"EDITOR\", \"vi\")\n\n if editor == \"vi\":\n click.echo(\n \"You are about to edit a file in vim. \\n\"\n \"Read the tutorial on vim at: https://bit.ly/2HRvvrB\"\n )\n time.sleep(1.5)\n\n subprocess.run([editor, file])\n ctx.exit(0)\n\n\ndef validate_config(ctx, path):\n \"\"\"Validate opsdroid configuration.\n\n We load the configuration and modules from it to run the validation on them.\n Only modules that contain the constant variable `CONFIG_SCHEMA` will be validated\n the ones without it will just be silent.\n\n Note that if the path doesn't exist or is a bad one click will throw an error telling\n you that the path doesn't exist. Also, the file needs to be either a json or a yaml file.\n\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (string): a string representing the path to load the config,\n obtained from `ctx.obj`.\n value (string): the value of this parameter after invocation.\n It is either \"config\" or \"log\" depending on the program\n calling this function.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n loader.load_modules_from_config(config)\n click.echo(\"Configuration validated - No errors founds!\")\n\n ctx.exit(0)\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\n\n Args:\n config: Language Configuration and it uses ISO 639-1 code.\n for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes\n\n\n \"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\"opsdroid\", LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\n\n Returns:\n int: the exit code. Returns 1 if the Python version installed is\n below 3.6.\n\n \"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 6:\n logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\n\n Args:\n config: config loaded by Loader\n\n Raises:\n KeyError: If 'welcome-message' key is not found in configuration file\n\n \"\"\"\n try:\n if config[\"welcome-message\"]:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(\n _(\n \"You can customise your opsdroid by modifying your configuration.yaml.\"\n )\n )\n _LOGGER.info(\n _(\"Read more at: http://opsdroid.readthedocs.io/#configuration\")\n )\n _LOGGER.info(_(\"Watch the Get Started Videos at: http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(\n _(\n \"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/releases\"\n )\n )\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(\n _(\"'welcome-message: true/false' is missing in configuration.yaml\")\n )\n\n\ndef list_all_modules(ctx, path):\n \"\"\"List the active modules from config.\n\n This function will try to get information from the modules that are active in the\n configuration file and print them as a table or will just print a sentence saying that\n there are no active modules for that type.\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (str): a str that contains a path passed.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n click.echo(\n click.style(\n f\"{'NAME':15} {'TYPE':15} {'MODE':15} {'CACHED':15} {'LOCATION':15}\",\n fg=\"blue\",\n bold=True,\n )\n )\n for module_type, module in config.items():\n if module_type in (\"connectors\", \"databases\", \"parsers\", \"skills\"):\n for name, options in module.items():\n\n mode = get_config_option(\n [\"repo\", \"path\", \"gist\"], options, True, \"module\"\n )\n cache = get_config_option([\"no-cache\"], options, \"no\", \"yes\")\n location = get_config_option(\n [\"repo\", \"path\", \"gist\"],\n options,\n True,\n f\"opsdroid.{module_type}.{name}\",\n )\n\n click.echo(\n f\"{name:15} {module_type:15} {mode[1]:15} {cache[0]:15} {location[2]:15}\"\n )\n\n ctx.exit(0)\n\n\ndef build_config(ctx, params):\n \"\"\"Load configuration, load modules and install dependencies.\n\n This function loads the configuration and install all necessary\n dependencies defined on a `requirements.txt` file inside the module.\n If the flag `--verbose` is passed the logging level will be set as debug and\n all logs will be shown to the user.\n\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n params (dict): a dictionary of all parameters pass to the click\n context when invoking this function as a callback.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n click.echo(\"Opsdroid will build modules from config.\")\n path = params.get(\"path\")\n\n with contextlib.suppress(Exception):\n check_dependencies()\n\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n if params[\"verbose\"]:\n config[\"logging\"] = {\"level\": \"debug\"}\n configure_logging(config)\n\n with OpsDroid(config=config) as opsdroid:\n\n opsdroid.loader.load_modules_from_config(config)\n\n click.echo(click.style(\"SUCCESS:\", bg=\"green\", bold=True), nl=False)\n click.echo(\" Opsdroid modules successfully built from config.\")\n", "path": "opsdroid/cli/utils.py"}, {"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Utilities for the opsdroid CLI commands.\"\"\"\n\nimport click\nimport contextlib\nimport gettext\nimport os\nimport logging\nimport subprocess\nimport sys\nimport time\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.configuration import load_config_file\nfrom opsdroid.const import (\n LOCALE_DIR,\n DEFAULT_LANGUAGE,\n DEFAULT_CONFIG_PATH,\n DEFAULT_CONFIG_LOCATIONS,\n)\nfrom opsdroid.helper import get_config_option\nfrom opsdroid.loader import Loader\nfrom opsdroid.logging import configure_logging\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\npath_option = click.option(\n \"-f\",\n \"path\",\n help=\"Load a configuration from a path instead of using the default location.\",\n type=click.Path(exists=True),\n)\n\n\ndef edit_config(ctx, path):\n \"\"\"Open config/log file with favourite editor.\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (str or None): the path passed to the config option.\n value (string): the value of this parameter after invocation.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n file = path or DEFAULT_CONFIG_PATH\n editor = os.environ.get(\"EDITOR\", \"vi\")\n\n if editor == \"vi\":\n click.echo(\n \"You are about to edit a file in vim. \\n\"\n \"Read the tutorial on vim at: https://bit.ly/2HRvvrB\"\n )\n time.sleep(1.5)\n\n subprocess.run([editor, file])\n ctx.exit(0)\n\n\ndef validate_config(ctx, path):\n \"\"\"Validate opsdroid configuration.\n\n We load the configuration and modules from it to run the validation on them.\n Only modules that contain the constant variable `CONFIG_SCHEMA` will be validated\n the ones without it will just be silent.\n\n Note that if the path doesn't exist or is a bad one click will throw an error telling\n you that the path doesn't exist. Also, the file needs to be either a json or a yaml file.\n\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (string): a string representing the path to load the config,\n obtained from `ctx.obj`.\n value (string): the value of this parameter after invocation.\n It is either \"config\" or \"log\" depending on the program\n calling this function.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n loader.load_modules_from_config(config)\n click.echo(\"Configuration validated - No errors founds!\")\n\n ctx.exit(0)\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\n\n Args:\n config: Language Configuration and it uses ISO 639-1 code.\n for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes\n\n\n \"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\"opsdroid\", LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\n\n Returns:\n int: the exit code. Returns 1 if the Python version installed is\n below 3.7.\n\n \"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 7:\n logging.critical(_(\"Whoops! opsdroid requires python 3.7 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\n\n Args:\n config: config loaded by Loader\n\n Raises:\n KeyError: If 'welcome-message' key is not found in configuration file\n\n \"\"\"\n try:\n if config[\"welcome-message\"]:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(\n _(\n \"You can customise your opsdroid by modifying your configuration.yaml.\"\n )\n )\n _LOGGER.info(\n _(\"Read more at: http://opsdroid.readthedocs.io/#configuration\")\n )\n _LOGGER.info(_(\"Watch the Get Started Videos at: http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(\n _(\n \"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/releases\"\n )\n )\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(\n _(\"'welcome-message: true/false' is missing in configuration.yaml\")\n )\n\n\ndef list_all_modules(ctx, path):\n \"\"\"List the active modules from config.\n\n This function will try to get information from the modules that are active in the\n configuration file and print them as a table or will just print a sentence saying that\n there are no active modules for that type.\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n path (str): a str that contains a path passed.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n click.echo(\n click.style(\n f\"{'NAME':15} {'TYPE':15} {'MODE':15} {'CACHED':15} {'LOCATION':15}\",\n fg=\"blue\",\n bold=True,\n )\n )\n for module_type, module in config.items():\n if module_type in (\"connectors\", \"databases\", \"parsers\", \"skills\"):\n for name, options in module.items():\n\n mode = get_config_option(\n [\"repo\", \"path\", \"gist\"], options, True, \"module\"\n )\n cache = get_config_option([\"no-cache\"], options, \"no\", \"yes\")\n location = get_config_option(\n [\"repo\", \"path\", \"gist\"],\n options,\n True,\n f\"opsdroid.{module_type}.{name}\",\n )\n\n click.echo(\n f\"{name:15} {module_type:15} {mode[1]:15} {cache[0]:15} {location[2]:15}\"\n )\n\n ctx.exit(0)\n\n\ndef build_config(ctx, params):\n \"\"\"Load configuration, load modules and install dependencies.\n\n This function loads the configuration and install all necessary\n dependencies defined on a `requirements.txt` file inside the module.\n If the flag `--verbose` is passed the logging level will be set as debug and\n all logs will be shown to the user.\n\n\n Args:\n ctx (:obj:`click.Context`): The current click cli context.\n params (dict): a dictionary of all parameters pass to the click\n context when invoking this function as a callback.\n\n Returns:\n int: the exit code. Always returns 0 in this case.\n\n \"\"\"\n click.echo(\"Opsdroid will build modules from config.\")\n path = params.get(\"path\")\n\n with contextlib.suppress(Exception):\n check_dependencies()\n\n config = load_config_file([path] if path else DEFAULT_CONFIG_LOCATIONS)\n\n if params[\"verbose\"]:\n config[\"logging\"] = {\"level\": \"debug\"}\n configure_logging(config)\n\n with OpsDroid(config=config) as opsdroid:\n\n opsdroid.loader.load_modules_from_config(config)\n\n click.echo(click.style(\"SUCCESS:\", bg=\"green\", bold=True), nl=False)\n click.echo(\" Opsdroid modules successfully built from config.\")\n", "path": "opsdroid/cli/utils.py"}, {"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}]}
| 3,593 | 293 |
gh_patches_debug_18676
|
rasdani/github-patches
|
git_diff
|
coala__coala-4183
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
can't build docs due to multiple targets for cross-reference
I can't successfully build the docs using `python setup.py docs` with Sphinx 1.5.5 due to the following error
```
Warning, treated as error:
/home/thomas/gitrepos/coala/coalib/core/DependencyTracker.py:docstring of coalib.core.DependencyTracker.DependencyTracker.check_circular_dependencies:None: WARNING: more than one target found for cross-reference 'CircularDependencyError': coalib.collecting.Dependencies.CircularDependencyError, coalib.core.CircularDependencyError.CircularDependencyError
```
I had a look at the implementation of the two CircularDependenyError classes and they are quite similar, can't they be merged?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/core/Graphs.py`
Content:
```
1 from coalib.core.CircularDependencyError import CircularDependencyError
2
3
4 def traverse_graph(start_nodes, get_successive_nodes,
5 run_on_edge=lambda prev, nxt: None):
6 """
7 Traverses all edges of a directed, possibly disconnected graph once.
8 Detects cyclic graphs by raising a ``CircularDependencyError``.
9
10 >>> graph = {1: [2], 2: [3, 4], 5: [3], 3: [6]}
11 >>> def get_successive_nodes(node):
12 ... return graph.get(node, [])
13 >>> edges = set()
14 >>> def append_to_edges(prev, nxt):
15 ... edges.add((prev, nxt))
16 >>> traverse_graph([1, 5], get_successive_nodes, append_to_edges)
17 >>> sorted(edges)
18 [(1, 2), (2, 3), (2, 4), (3, 6), (5, 3)]
19
20 You can also use this function to detect cyclic graphs:
21
22 >>> graph = {1: [2], 2: [3], 3: [1]}
23 >>> traverse_graph([1], get_successive_nodes)
24 Traceback (most recent call last):
25 ...
26 coalib.core.CircularDependencyError.CircularDependencyError: ...
27
28 :param start_nodes:
29 The nodes where to start traversing the graph.
30 :param get_successive_nodes:
31 A callable that takes in a node and returns an iterable of nodes to
32 traverse next.
33 :param run_on_edge:
34 A callable that is run on each edge during traversing. Takes in two
35 parameters, the previous- and next-node which form an edge. The default
36 is an empty function.
37 :raises CircularDependencyError:
38 Raised when the graph is cyclic.
39 """
40 path = set()
41 visited_nodes = set()
42
43 def visit(node):
44 if node not in visited_nodes:
45 visited_nodes.add(node)
46 path.add(node)
47
48 for subnode in get_successive_nodes(node):
49 run_on_edge(node, subnode)
50
51 if subnode in path:
52 raise CircularDependencyError(subnode)
53
54 visit(subnode)
55
56 path.remove(node)
57
58 for node in start_nodes:
59 visit(node)
60
```
Path: `coalib/core/DependencyTracker.py`
Content:
```
1 from itertools import chain
2
3 from coalib.core.Graphs import traverse_graph
4
5
6 class DependencyTracker:
7 """
8 A ``DependencyTracker`` allows to register and manage dependencies between
9 objects.
10
11 This class uses a directed graph to track relations.
12
13 Add a dependency relation between two objects:
14
15 >>> object1 = object()
16 >>> object2 = object()
17 >>> tracker = DependencyTracker()
18 >>> tracker.add(object2, object1)
19
20 This would define that ``object1`` is dependent on ``object2``.
21
22 If you define that ``object2`` has its dependency duty fulfilled, you can
23 resolve it:
24
25 >>> resolved = tracker.resolve(object2)
26 >>> resolved
27 {<object object at ...>}
28 >>> resolved_object = resolved.pop()
29 >>> resolved_object is object1
30 True
31
32 This returns all objects that are now freed, meaning they have no
33 dependencies any more.
34
35 >>> object3 = object()
36 >>> tracker.add(object2, object1)
37 >>> tracker.add(object3, object1)
38 >>> tracker.resolve(object2)
39 set()
40 >>> tracker.resolve(object3)
41 {<object object at ...>}
42
43 The ones who instantiate a ``DependencyTracker`` are responsible for
44 resolving dependencies in the right order. Dependencies which are itself
45 dependent will be forcefully resolved and removed from their according
46 dependencies too.
47 """
48
49 def __init__(self):
50 self._dependency_dict = {}
51
52 def get_dependants(self, dependency):
53 """
54 Returns all immediate dependants for the given dependency.
55
56 >>> tracker = DependencyTracker()
57 >>> tracker.add(0, 1)
58 >>> tracker.add(0, 2)
59 >>> tracker.add(1, 3)
60 >>> tracker.get_dependants(0)
61 {1, 2}
62 >>> tracker.get_dependants(1)
63 {3}
64 >>> tracker.get_dependants(2)
65 set()
66
67 :param dependency:
68 The dependency to retrieve all dependants from.
69 :return:
70 A set of dependants.
71 """
72 try:
73 return set(self._dependency_dict[dependency])
74 except KeyError:
75 return set()
76
77 def get_dependencies(self, dependant):
78 """
79 Returns all immediate dependencies of a given dependant.
80
81 >>> tracker = DependencyTracker()
82 >>> tracker.add(0, 1)
83 >>> tracker.add(0, 2)
84 >>> tracker.add(1, 2)
85 >>> tracker.get_dependencies(0)
86 set()
87 >>> tracker.get_dependencies(1)
88 {0}
89 >>> tracker.get_dependencies(2)
90 {0, 1}
91
92 :param dependant:
93 The dependant to retrieve all dependencies from.
94 :return:
95 A set of dependencies.
96 """
97 return set(
98 dependency
99 for dependency, dependants in self._dependency_dict.items()
100 if dependant in dependants)
101
102 def get_all_dependants(self, dependency):
103 """
104 Returns a set of all dependants of the given dependency, even
105 indirectly related ones.
106
107 >>> tracker = DependencyTracker()
108 >>> tracker.add(0, 1)
109 >>> tracker.add(1, 2)
110 >>> tracker.get_all_dependants(0)
111 {1, 2}
112
113 :param dependency:
114 The dependency to get all dependants for.
115 :return:
116 A set of dependants.
117 """
118 dependants = set()
119
120 def append_to_dependants(prev, nxt):
121 dependants.add(nxt)
122
123 traverse_graph(
124 [dependency],
125 lambda node: self._dependency_dict.get(node, frozenset()),
126 append_to_dependants)
127
128 return dependants
129
130 def get_all_dependencies(self, dependant):
131 """
132 Returns a set of all dependencies of the given dependants, even
133 indirectly related ones.
134
135 >>> tracker = DependencyTracker()
136 >>> tracker.add(0, 1)
137 >>> tracker.add(1, 2)
138 >>> tracker.get_all_dependencies(2)
139 {0, 1}
140
141 :param dependant:
142 The dependant to get all dependencies for.
143 :return:
144 A set of dependencies.
145 """
146 dependencies = set()
147
148 def append_to_dependencies(prev, nxt):
149 dependencies.add(nxt)
150
151 traverse_graph(
152 [dependant],
153 lambda node:
154 {dependency
155 for dependency, dependants in self._dependency_dict.items()
156 if node in dependants},
157 append_to_dependencies)
158
159 return dependencies
160
161 @property
162 def dependants(self):
163 """
164 Returns a set of all registered dependants.
165
166 >>> tracker = DependencyTracker()
167 >>> tracker.add(0, 1)
168 >>> tracker.add(0, 2)
169 >>> tracker.add(1, 3)
170 >>> tracker.dependants
171 {1, 2, 3}
172 """
173 return set(chain.from_iterable(self._dependency_dict.values()))
174
175 @property
176 def dependencies(self):
177 """
178 Returns a set of all registered dependencies.
179
180 >>> tracker = DependencyTracker()
181 >>> tracker.add(0, 1)
182 >>> tracker.add(0, 2)
183 >>> tracker.add(1, 3)
184 >>> tracker.dependencies
185 {0, 1}
186 """
187 return set(self._dependency_dict.keys())
188
189 def __iter__(self):
190 """
191 Returns an iterator that iterates over all dependency relations.
192
193 >>> tracker = DependencyTracker()
194 >>> tracker.add(0, 1)
195 >>> tracker.add(0, 2)
196 >>> tracker.add(1, 2)
197 >>> for dependency, dependant in sorted(tracker):
198 ... print(dependency, '->', dependant)
199 0 -> 1
200 0 -> 2
201 1 -> 2
202 """
203 return ((dependency, dependant)
204 for dependency, dependants in self._dependency_dict.items()
205 for dependant in dependants)
206
207 def add(self, dependency, dependant):
208 """
209 Add a dependency relation.
210
211 This function does not check for circular dependencies.
212
213 >>> tracker = DependencyTracker()
214 >>> tracker.add(0, 1)
215 >>> tracker.add(0, 2)
216 >>> tracker.resolve(0)
217 {1, 2}
218
219 :param dependency:
220 The object that is the dependency.
221 :param dependant:
222 The object that is the dependant.
223 """
224 if dependency not in self._dependency_dict:
225 self._dependency_dict[dependency] = set()
226
227 self._dependency_dict[dependency].add(dependant)
228
229 def resolve(self, dependency):
230 """
231 Resolves all dependency-relations from the given dependency, and frees
232 and returns dependants with no more dependencies. If the given
233 dependency is itself a dependant, all those relations are also removed.
234
235 >>> tracker = DependencyTracker()
236 >>> tracker.add(0, 1)
237 >>> tracker.add(0, 2)
238 >>> tracker.add(2, 3)
239 >>> tracker.resolve(0)
240 {1, 2}
241 >>> tracker.resolve(2)
242 {3}
243 >>> tracker.resolve(2)
244 set()
245
246 :param dependency:
247 The dependency.
248 :return:
249 Returns a set of dependants whose dependencies were all resolved.
250 """
251 # Check if dependency has itself dependencies which aren't resolved,
252 # these need to be removed too. This operation does not free any
253 # dependencies.
254 dependencies_to_remove = []
255 for tracked_dependency, dependants in self._dependency_dict.items():
256 if dependency in dependants:
257 dependants.remove(dependency)
258
259 # If dependants set is now empty, schedule dependency for
260 # removal from dependency_dict.
261 if not dependants:
262 dependencies_to_remove.append(tracked_dependency)
263
264 for tracked_dependency in dependencies_to_remove:
265 del self._dependency_dict[tracked_dependency]
266
267 # Now free dependants which do depend on the given dependency.
268 possible_freed_dependants = self._dependency_dict.pop(
269 dependency, set())
270 non_free_dependants = set()
271
272 for possible_freed_dependant in possible_freed_dependants:
273 # Check if all dependencies of dependants from above are satisfied.
274 # If so, there are no more dependencies for dependant. Thus it's
275 # resolved.
276 for dependants in self._dependency_dict.values():
277 if possible_freed_dependant in dependants:
278 non_free_dependants.add(possible_freed_dependant)
279 break
280
281 # Remaining dependents are officially resolved.
282 return possible_freed_dependants - non_free_dependants
283
284 def check_circular_dependencies(self):
285 """
286 Checks whether there are circular dependency conflicts.
287
288 >>> tracker = DependencyTracker()
289 >>> tracker.add(0, 1)
290 >>> tracker.add(1, 0)
291 >>> tracker.check_circular_dependencies()
292 Traceback (most recent call last):
293 ...
294 coalib.core.CircularDependencyError.CircularDependencyError: ...
295
296 :raises CircularDependencyError:
297 Raised on circular dependency conflicts.
298 """
299 traverse_graph(
300 self._dependency_dict.keys(),
301 lambda node: self._dependency_dict.get(node, frozenset()))
302
303 @property
304 def are_dependencies_resolved(self):
305 """
306 Checks whether all dependencies in this ``DependencyTracker`` instance
307 are resolved.
308
309 >>> tracker = DependencyTracker()
310 >>> tracker.are_dependencies_resolved
311 True
312 >>> tracker.add(0, 1)
313 >>> tracker.are_dependencies_resolved
314 False
315 >>> tracker.resolve(0)
316 {1}
317 >>> tracker.are_dependencies_resolved
318 True
319
320 :return:
321 ``True`` when all dependencies resolved, ``False`` if not.
322 """
323 return not self._dependency_dict
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/coalib/core/DependencyTracker.py b/coalib/core/DependencyTracker.py
--- a/coalib/core/DependencyTracker.py
+++ b/coalib/core/DependencyTracker.py
@@ -293,7 +293,7 @@
...
coalib.core.CircularDependencyError.CircularDependencyError: ...
- :raises CircularDependencyError:
+ :raises coalib.core.CircularDependencyError.CircularDependencyError:
Raised on circular dependency conflicts.
"""
traverse_graph(
diff --git a/coalib/core/Graphs.py b/coalib/core/Graphs.py
--- a/coalib/core/Graphs.py
+++ b/coalib/core/Graphs.py
@@ -34,7 +34,7 @@
A callable that is run on each edge during traversing. Takes in two
parameters, the previous- and next-node which form an edge. The default
is an empty function.
- :raises CircularDependencyError:
+ :raises coalib.core.CircularDependencyError.CircularDependencyError:
Raised when the graph is cyclic.
"""
path = set()
|
{"golden_diff": "diff --git a/coalib/core/DependencyTracker.py b/coalib/core/DependencyTracker.py\n--- a/coalib/core/DependencyTracker.py\n+++ b/coalib/core/DependencyTracker.py\n@@ -293,7 +293,7 @@\n ...\n coalib.core.CircularDependencyError.CircularDependencyError: ...\n \n- :raises CircularDependencyError:\n+ :raises coalib.core.CircularDependencyError.CircularDependencyError:\n Raised on circular dependency conflicts.\n \"\"\"\n traverse_graph(\ndiff --git a/coalib/core/Graphs.py b/coalib/core/Graphs.py\n--- a/coalib/core/Graphs.py\n+++ b/coalib/core/Graphs.py\n@@ -34,7 +34,7 @@\n A callable that is run on each edge during traversing. Takes in two\n parameters, the previous- and next-node which form an edge. The default\n is an empty function.\n- :raises CircularDependencyError:\n+ :raises coalib.core.CircularDependencyError.CircularDependencyError:\n Raised when the graph is cyclic.\n \"\"\"\n path = set()\n", "issue": "can't build docs due to multiple targets for cross-reference\nI can't successfully build the docs using `python setup.py docs` with Sphinx 1.5.5 due to the following error\r\n\r\n```\r\nWarning, treated as error:\r\n/home/thomas/gitrepos/coala/coalib/core/DependencyTracker.py:docstring of coalib.core.DependencyTracker.DependencyTracker.check_circular_dependencies:None: WARNING: more than one target found for cross-reference 'CircularDependencyError': coalib.collecting.Dependencies.CircularDependencyError, coalib.core.CircularDependencyError.CircularDependencyError\r\n```\r\n\r\nI had a look at the implementation of the two CircularDependenyError classes and they are quite similar, can't they be merged?\n", "before_files": [{"content": "from coalib.core.CircularDependencyError import CircularDependencyError\n\n\ndef traverse_graph(start_nodes, get_successive_nodes,\n run_on_edge=lambda prev, nxt: None):\n \"\"\"\n Traverses all edges of a directed, possibly disconnected graph once.\n Detects cyclic graphs by raising a ``CircularDependencyError``.\n\n >>> graph = {1: [2], 2: [3, 4], 5: [3], 3: [6]}\n >>> def get_successive_nodes(node):\n ... return graph.get(node, [])\n >>> edges = set()\n >>> def append_to_edges(prev, nxt):\n ... edges.add((prev, nxt))\n >>> traverse_graph([1, 5], get_successive_nodes, append_to_edges)\n >>> sorted(edges)\n [(1, 2), (2, 3), (2, 4), (3, 6), (5, 3)]\n\n You can also use this function to detect cyclic graphs:\n\n >>> graph = {1: [2], 2: [3], 3: [1]}\n >>> traverse_graph([1], get_successive_nodes)\n Traceback (most recent call last):\n ...\n coalib.core.CircularDependencyError.CircularDependencyError: ...\n\n :param start_nodes:\n The nodes where to start traversing the graph.\n :param get_successive_nodes:\n A callable that takes in a node and returns an iterable of nodes to\n traverse next.\n :param run_on_edge:\n A callable that is run on each edge during traversing. Takes in two\n parameters, the previous- and next-node which form an edge. The default\n is an empty function.\n :raises CircularDependencyError:\n Raised when the graph is cyclic.\n \"\"\"\n path = set()\n visited_nodes = set()\n\n def visit(node):\n if node not in visited_nodes:\n visited_nodes.add(node)\n path.add(node)\n\n for subnode in get_successive_nodes(node):\n run_on_edge(node, subnode)\n\n if subnode in path:\n raise CircularDependencyError(subnode)\n\n visit(subnode)\n\n path.remove(node)\n\n for node in start_nodes:\n visit(node)\n", "path": "coalib/core/Graphs.py"}, {"content": "from itertools import chain\n\nfrom coalib.core.Graphs import traverse_graph\n\n\nclass DependencyTracker:\n \"\"\"\n A ``DependencyTracker`` allows to register and manage dependencies between\n objects.\n\n This class uses a directed graph to track relations.\n\n Add a dependency relation between two objects:\n\n >>> object1 = object()\n >>> object2 = object()\n >>> tracker = DependencyTracker()\n >>> tracker.add(object2, object1)\n\n This would define that ``object1`` is dependent on ``object2``.\n\n If you define that ``object2`` has its dependency duty fulfilled, you can\n resolve it:\n\n >>> resolved = tracker.resolve(object2)\n >>> resolved\n {<object object at ...>}\n >>> resolved_object = resolved.pop()\n >>> resolved_object is object1\n True\n\n This returns all objects that are now freed, meaning they have no\n dependencies any more.\n\n >>> object3 = object()\n >>> tracker.add(object2, object1)\n >>> tracker.add(object3, object1)\n >>> tracker.resolve(object2)\n set()\n >>> tracker.resolve(object3)\n {<object object at ...>}\n\n The ones who instantiate a ``DependencyTracker`` are responsible for\n resolving dependencies in the right order. Dependencies which are itself\n dependent will be forcefully resolved and removed from their according\n dependencies too.\n \"\"\"\n\n def __init__(self):\n self._dependency_dict = {}\n\n def get_dependants(self, dependency):\n \"\"\"\n Returns all immediate dependants for the given dependency.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.get_dependants(0)\n {1, 2}\n >>> tracker.get_dependants(1)\n {3}\n >>> tracker.get_dependants(2)\n set()\n\n :param dependency:\n The dependency to retrieve all dependants from.\n :return:\n A set of dependants.\n \"\"\"\n try:\n return set(self._dependency_dict[dependency])\n except KeyError:\n return set()\n\n def get_dependencies(self, dependant):\n \"\"\"\n Returns all immediate dependencies of a given dependant.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 2)\n >>> tracker.get_dependencies(0)\n set()\n >>> tracker.get_dependencies(1)\n {0}\n >>> tracker.get_dependencies(2)\n {0, 1}\n\n :param dependant:\n The dependant to retrieve all dependencies from.\n :return:\n A set of dependencies.\n \"\"\"\n return set(\n dependency\n for dependency, dependants in self._dependency_dict.items()\n if dependant in dependants)\n\n def get_all_dependants(self, dependency):\n \"\"\"\n Returns a set of all dependants of the given dependency, even\n indirectly related ones.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 2)\n >>> tracker.get_all_dependants(0)\n {1, 2}\n\n :param dependency:\n The dependency to get all dependants for.\n :return:\n A set of dependants.\n \"\"\"\n dependants = set()\n\n def append_to_dependants(prev, nxt):\n dependants.add(nxt)\n\n traverse_graph(\n [dependency],\n lambda node: self._dependency_dict.get(node, frozenset()),\n append_to_dependants)\n\n return dependants\n\n def get_all_dependencies(self, dependant):\n \"\"\"\n Returns a set of all dependencies of the given dependants, even\n indirectly related ones.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 2)\n >>> tracker.get_all_dependencies(2)\n {0, 1}\n\n :param dependant:\n The dependant to get all dependencies for.\n :return:\n A set of dependencies.\n \"\"\"\n dependencies = set()\n\n def append_to_dependencies(prev, nxt):\n dependencies.add(nxt)\n\n traverse_graph(\n [dependant],\n lambda node:\n {dependency\n for dependency, dependants in self._dependency_dict.items()\n if node in dependants},\n append_to_dependencies)\n\n return dependencies\n\n @property\n def dependants(self):\n \"\"\"\n Returns a set of all registered dependants.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.dependants\n {1, 2, 3}\n \"\"\"\n return set(chain.from_iterable(self._dependency_dict.values()))\n\n @property\n def dependencies(self):\n \"\"\"\n Returns a set of all registered dependencies.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.dependencies\n {0, 1}\n \"\"\"\n return set(self._dependency_dict.keys())\n\n def __iter__(self):\n \"\"\"\n Returns an iterator that iterates over all dependency relations.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 2)\n >>> for dependency, dependant in sorted(tracker):\n ... print(dependency, '->', dependant)\n 0 -> 1\n 0 -> 2\n 1 -> 2\n \"\"\"\n return ((dependency, dependant)\n for dependency, dependants in self._dependency_dict.items()\n for dependant in dependants)\n\n def add(self, dependency, dependant):\n \"\"\"\n Add a dependency relation.\n\n This function does not check for circular dependencies.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.resolve(0)\n {1, 2}\n\n :param dependency:\n The object that is the dependency.\n :param dependant:\n The object that is the dependant.\n \"\"\"\n if dependency not in self._dependency_dict:\n self._dependency_dict[dependency] = set()\n\n self._dependency_dict[dependency].add(dependant)\n\n def resolve(self, dependency):\n \"\"\"\n Resolves all dependency-relations from the given dependency, and frees\n and returns dependants with no more dependencies. If the given\n dependency is itself a dependant, all those relations are also removed.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(2, 3)\n >>> tracker.resolve(0)\n {1, 2}\n >>> tracker.resolve(2)\n {3}\n >>> tracker.resolve(2)\n set()\n\n :param dependency:\n The dependency.\n :return:\n Returns a set of dependants whose dependencies were all resolved.\n \"\"\"\n # Check if dependency has itself dependencies which aren't resolved,\n # these need to be removed too. This operation does not free any\n # dependencies.\n dependencies_to_remove = []\n for tracked_dependency, dependants in self._dependency_dict.items():\n if dependency in dependants:\n dependants.remove(dependency)\n\n # If dependants set is now empty, schedule dependency for\n # removal from dependency_dict.\n if not dependants:\n dependencies_to_remove.append(tracked_dependency)\n\n for tracked_dependency in dependencies_to_remove:\n del self._dependency_dict[tracked_dependency]\n\n # Now free dependants which do depend on the given dependency.\n possible_freed_dependants = self._dependency_dict.pop(\n dependency, set())\n non_free_dependants = set()\n\n for possible_freed_dependant in possible_freed_dependants:\n # Check if all dependencies of dependants from above are satisfied.\n # If so, there are no more dependencies for dependant. Thus it's\n # resolved.\n for dependants in self._dependency_dict.values():\n if possible_freed_dependant in dependants:\n non_free_dependants.add(possible_freed_dependant)\n break\n\n # Remaining dependents are officially resolved.\n return possible_freed_dependants - non_free_dependants\n\n def check_circular_dependencies(self):\n \"\"\"\n Checks whether there are circular dependency conflicts.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 0)\n >>> tracker.check_circular_dependencies()\n Traceback (most recent call last):\n ...\n coalib.core.CircularDependencyError.CircularDependencyError: ...\n\n :raises CircularDependencyError:\n Raised on circular dependency conflicts.\n \"\"\"\n traverse_graph(\n self._dependency_dict.keys(),\n lambda node: self._dependency_dict.get(node, frozenset()))\n\n @property\n def are_dependencies_resolved(self):\n \"\"\"\n Checks whether all dependencies in this ``DependencyTracker`` instance\n are resolved.\n\n >>> tracker = DependencyTracker()\n >>> tracker.are_dependencies_resolved\n True\n >>> tracker.add(0, 1)\n >>> tracker.are_dependencies_resolved\n False\n >>> tracker.resolve(0)\n {1}\n >>> tracker.are_dependencies_resolved\n True\n\n :return:\n ``True`` when all dependencies resolved, ``False`` if not.\n \"\"\"\n return not self._dependency_dict\n", "path": "coalib/core/DependencyTracker.py"}], "after_files": [{"content": "from coalib.core.CircularDependencyError import CircularDependencyError\n\n\ndef traverse_graph(start_nodes, get_successive_nodes,\n run_on_edge=lambda prev, nxt: None):\n \"\"\"\n Traverses all edges of a directed, possibly disconnected graph once.\n Detects cyclic graphs by raising a ``CircularDependencyError``.\n\n >>> graph = {1: [2], 2: [3, 4], 5: [3], 3: [6]}\n >>> def get_successive_nodes(node):\n ... return graph.get(node, [])\n >>> edges = set()\n >>> def append_to_edges(prev, nxt):\n ... edges.add((prev, nxt))\n >>> traverse_graph([1, 5], get_successive_nodes, append_to_edges)\n >>> sorted(edges)\n [(1, 2), (2, 3), (2, 4), (3, 6), (5, 3)]\n\n You can also use this function to detect cyclic graphs:\n\n >>> graph = {1: [2], 2: [3], 3: [1]}\n >>> traverse_graph([1], get_successive_nodes)\n Traceback (most recent call last):\n ...\n coalib.core.CircularDependencyError.CircularDependencyError: ...\n\n :param start_nodes:\n The nodes where to start traversing the graph.\n :param get_successive_nodes:\n A callable that takes in a node and returns an iterable of nodes to\n traverse next.\n :param run_on_edge:\n A callable that is run on each edge during traversing. Takes in two\n parameters, the previous- and next-node which form an edge. The default\n is an empty function.\n :raises coalib.core.CircularDependencyError.CircularDependencyError:\n Raised when the graph is cyclic.\n \"\"\"\n path = set()\n visited_nodes = set()\n\n def visit(node):\n if node not in visited_nodes:\n visited_nodes.add(node)\n path.add(node)\n\n for subnode in get_successive_nodes(node):\n run_on_edge(node, subnode)\n\n if subnode in path:\n raise CircularDependencyError(subnode)\n\n visit(subnode)\n\n path.remove(node)\n\n for node in start_nodes:\n visit(node)\n", "path": "coalib/core/Graphs.py"}, {"content": "from itertools import chain\n\nfrom coalib.core.Graphs import traverse_graph\n\n\nclass DependencyTracker:\n \"\"\"\n A ``DependencyTracker`` allows to register and manage dependencies between\n objects.\n\n This class uses a directed graph to track relations.\n\n Add a dependency relation between two objects:\n\n >>> object1 = object()\n >>> object2 = object()\n >>> tracker = DependencyTracker()\n >>> tracker.add(object2, object1)\n\n This would define that ``object1`` is dependent on ``object2``.\n\n If you define that ``object2`` has its dependency duty fulfilled, you can\n resolve it:\n\n >>> resolved = tracker.resolve(object2)\n >>> resolved\n {<object object at ...>}\n >>> resolved_object = resolved.pop()\n >>> resolved_object is object1\n True\n\n This returns all objects that are now freed, meaning they have no\n dependencies any more.\n\n >>> object3 = object()\n >>> tracker.add(object2, object1)\n >>> tracker.add(object3, object1)\n >>> tracker.resolve(object2)\n set()\n >>> tracker.resolve(object3)\n {<object object at ...>}\n\n The ones who instantiate a ``DependencyTracker`` are responsible for\n resolving dependencies in the right order. Dependencies which are itself\n dependent will be forcefully resolved and removed from their according\n dependencies too.\n \"\"\"\n\n def __init__(self):\n self._dependency_dict = {}\n\n def get_dependants(self, dependency):\n \"\"\"\n Returns all immediate dependants for the given dependency.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.get_dependants(0)\n {1, 2}\n >>> tracker.get_dependants(1)\n {3}\n >>> tracker.get_dependants(2)\n set()\n\n :param dependency:\n The dependency to retrieve all dependants from.\n :return:\n A set of dependants.\n \"\"\"\n try:\n return set(self._dependency_dict[dependency])\n except KeyError:\n return set()\n\n def get_dependencies(self, dependant):\n \"\"\"\n Returns all immediate dependencies of a given dependant.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 2)\n >>> tracker.get_dependencies(0)\n set()\n >>> tracker.get_dependencies(1)\n {0}\n >>> tracker.get_dependencies(2)\n {0, 1}\n\n :param dependant:\n The dependant to retrieve all dependencies from.\n :return:\n A set of dependencies.\n \"\"\"\n return set(\n dependency\n for dependency, dependants in self._dependency_dict.items()\n if dependant in dependants)\n\n def get_all_dependants(self, dependency):\n \"\"\"\n Returns a set of all dependants of the given dependency, even\n indirectly related ones.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 2)\n >>> tracker.get_all_dependants(0)\n {1, 2}\n\n :param dependency:\n The dependency to get all dependants for.\n :return:\n A set of dependants.\n \"\"\"\n dependants = set()\n\n def append_to_dependants(prev, nxt):\n dependants.add(nxt)\n\n traverse_graph(\n [dependency],\n lambda node: self._dependency_dict.get(node, frozenset()),\n append_to_dependants)\n\n return dependants\n\n def get_all_dependencies(self, dependant):\n \"\"\"\n Returns a set of all dependencies of the given dependants, even\n indirectly related ones.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 2)\n >>> tracker.get_all_dependencies(2)\n {0, 1}\n\n :param dependant:\n The dependant to get all dependencies for.\n :return:\n A set of dependencies.\n \"\"\"\n dependencies = set()\n\n def append_to_dependencies(prev, nxt):\n dependencies.add(nxt)\n\n traverse_graph(\n [dependant],\n lambda node:\n {dependency\n for dependency, dependants in self._dependency_dict.items()\n if node in dependants},\n append_to_dependencies)\n\n return dependencies\n\n @property\n def dependants(self):\n \"\"\"\n Returns a set of all registered dependants.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.dependants\n {1, 2, 3}\n \"\"\"\n return set(chain.from_iterable(self._dependency_dict.values()))\n\n @property\n def dependencies(self):\n \"\"\"\n Returns a set of all registered dependencies.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 3)\n >>> tracker.dependencies\n {0, 1}\n \"\"\"\n return set(self._dependency_dict.keys())\n\n def __iter__(self):\n \"\"\"\n Returns an iterator that iterates over all dependency relations.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(1, 2)\n >>> for dependency, dependant in sorted(tracker):\n ... print(dependency, '->', dependant)\n 0 -> 1\n 0 -> 2\n 1 -> 2\n \"\"\"\n return ((dependency, dependant)\n for dependency, dependants in self._dependency_dict.items()\n for dependant in dependants)\n\n def add(self, dependency, dependant):\n \"\"\"\n Add a dependency relation.\n\n This function does not check for circular dependencies.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.resolve(0)\n {1, 2}\n\n :param dependency:\n The object that is the dependency.\n :param dependant:\n The object that is the dependant.\n \"\"\"\n if dependency not in self._dependency_dict:\n self._dependency_dict[dependency] = set()\n\n self._dependency_dict[dependency].add(dependant)\n\n def resolve(self, dependency):\n \"\"\"\n Resolves all dependency-relations from the given dependency, and frees\n and returns dependants with no more dependencies. If the given\n dependency is itself a dependant, all those relations are also removed.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(0, 2)\n >>> tracker.add(2, 3)\n >>> tracker.resolve(0)\n {1, 2}\n >>> tracker.resolve(2)\n {3}\n >>> tracker.resolve(2)\n set()\n\n :param dependency:\n The dependency.\n :return:\n Returns a set of dependants whose dependencies were all resolved.\n \"\"\"\n # Check if dependency has itself dependencies which aren't resolved,\n # these need to be removed too. This operation does not free any\n # dependencies.\n dependencies_to_remove = []\n for tracked_dependency, dependants in self._dependency_dict.items():\n if dependency in dependants:\n dependants.remove(dependency)\n\n # If dependants set is now empty, schedule dependency for\n # removal from dependency_dict.\n if not dependants:\n dependencies_to_remove.append(tracked_dependency)\n\n for tracked_dependency in dependencies_to_remove:\n del self._dependency_dict[tracked_dependency]\n\n # Now free dependants which do depend on the given dependency.\n possible_freed_dependants = self._dependency_dict.pop(\n dependency, set())\n non_free_dependants = set()\n\n for possible_freed_dependant in possible_freed_dependants:\n # Check if all dependencies of dependants from above are satisfied.\n # If so, there are no more dependencies for dependant. Thus it's\n # resolved.\n for dependants in self._dependency_dict.values():\n if possible_freed_dependant in dependants:\n non_free_dependants.add(possible_freed_dependant)\n break\n\n # Remaining dependents are officially resolved.\n return possible_freed_dependants - non_free_dependants\n\n def check_circular_dependencies(self):\n \"\"\"\n Checks whether there are circular dependency conflicts.\n\n >>> tracker = DependencyTracker()\n >>> tracker.add(0, 1)\n >>> tracker.add(1, 0)\n >>> tracker.check_circular_dependencies()\n Traceback (most recent call last):\n ...\n coalib.core.CircularDependencyError.CircularDependencyError: ...\n\n :raises coalib.core.CircularDependencyError.CircularDependencyError:\n Raised on circular dependency conflicts.\n \"\"\"\n traverse_graph(\n self._dependency_dict.keys(),\n lambda node: self._dependency_dict.get(node, frozenset()))\n\n @property\n def are_dependencies_resolved(self):\n \"\"\"\n Checks whether all dependencies in this ``DependencyTracker`` instance\n are resolved.\n\n >>> tracker = DependencyTracker()\n >>> tracker.are_dependencies_resolved\n True\n >>> tracker.add(0, 1)\n >>> tracker.are_dependencies_resolved\n False\n >>> tracker.resolve(0)\n {1}\n >>> tracker.are_dependencies_resolved\n True\n\n :return:\n ``True`` when all dependencies resolved, ``False`` if not.\n \"\"\"\n return not self._dependency_dict\n", "path": "coalib/core/DependencyTracker.py"}]}
| 3,975 | 247 |
gh_patches_debug_30479
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-914
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove PESetupHist from cime.setup
When running cime.setup --clean, we don't need to save files to PESetupHist
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/case_setup.py`
Content:
```
1 """
2 Library for case.setup.
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.check_lockedfiles import check_lockedfiles
8 from CIME.preview_namelists import create_dirs, create_namelists
9 from CIME.XML.env_mach_pes import EnvMachPes
10 from CIME.XML.machines import Machines
11 from CIME.BuildTools.configure import configure
12 from CIME.utils import append_status, get_cime_root
13 from CIME.test_status import *
14
15 import shutil, time, glob
16
17 logger = logging.getLogger(__name__)
18
19 ###############################################################################
20 def _check_pelayouts_require_rebuild(case, models):
21 ###############################################################################
22 """
23 Create if we require a rebuild, expects cwd is caseroot
24 """
25 locked_pes = "LockedFiles/env_mach_pes.xml"
26 if os.path.exists(locked_pes):
27 # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined
28 # for any component
29 env_mach_pes_locked = EnvMachPes(infile=locked_pes)
30 for comp in models:
31 if case.get_value("%s_PE_CHANGE_REQUIRES_REBUILD" % comp):
32 # Changing these values in env_mach_pes.xml will force
33 # you to clean the corresponding component
34 old_tasks = env_mach_pes_locked.get_value("NTASKS_%s" % comp)
35 old_threads = env_mach_pes_locked.get_value("NTHRDS_%s" % comp)
36 old_inst = env_mach_pes_locked.get_value("NINST_%s" % comp)
37
38 new_tasks = case.get_value("NTASKS_%s" % comp)
39 new_threads = case.get_value("NTHRDS_%s" % comp)
40 new_inst = case.get_value("NINST_%s" % comp)
41
42 if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:
43 logger.warn("%s pe change requires clean build" % comp)
44 cleanflag = comp.lower()
45 run_cmd_no_fail("./case.build --clean %s" % cleanflag)
46
47 os.remove(locked_pes)
48
49 ###############################################################################
50 def _build_usernl_files(case, model, comp):
51 ###############################################################################
52 """
53 Create user_nl_xxx files, expects cwd is caseroot
54 """
55 model = model.upper()
56 model_file = case.get_value("CONFIG_%s_FILE" % model)
57 model_dir = os.path.dirname(model_file)
58
59 expect(os.path.isdir(model_dir),
60 "cannot find cime_config directory %s for component %s" % (model_dir, comp))
61
62 if comp == "cpl":
63 if not os.path.exists("user_nl_cpl"):
64 shutil.copy(os.path.join(model_dir, "user_nl_cpl"), ".")
65 else:
66 ninst = case.get_value("NINST_%s" % model)
67 nlfile = "user_nl_%s" % comp
68 model_nl = os.path.join(model_dir, nlfile)
69 if ninst > 1:
70 for inst_counter in xrange(1, ninst+1):
71 inst_nlfile = "%s_%04d" % (nlfile, inst_counter)
72 if not os.path.exists(inst_nlfile):
73 # If there is a user_nl_foo in the case directory, copy it
74 # to user_nl_foo_INST; otherwise, copy the original
75 # user_nl_foo from model_dir
76 if os.path.exists(nlfile):
77 shutil.copy(nlfile, inst_nlfile)
78 elif os.path.exists(model_nl):
79 shutil.copy(model_nl, inst_nlfile)
80 else:
81 # ninst = 1
82 if not os.path.exists(nlfile):
83 if os.path.exists(model_nl):
84 shutil.copy(model_nl, nlfile)
85
86 ###############################################################################
87 def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False):
88 ###############################################################################
89 os.chdir(caseroot)
90 msg = "case.setup starting"
91 append_status(msg, caseroot=caseroot, sfile="CaseStatus")
92
93 cimeroot = get_cime_root(case)
94
95 # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests
96 din_loc_root = case.get_value("DIN_LOC_ROOT")
97 testcase = case.get_value("TESTCASE")
98 expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"),
99 "inputdata root is not a directory: \"$din_loc_root\" ")
100
101 # Check that userdefine settings are specified before expanding variable
102 for vid, value in case:
103 expect(not (type(value) is str and "USERDEFINED_required_build" in value),
104 "Parameter '%s' must be defined" % vid)
105
106 # Create batch script
107 if reset or clean:
108 # Clean batch script
109
110 backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S")
111 if not os.path.isdir(backup_dir):
112 os.makedirs(backup_dir)
113
114 # back up relevant files
115 for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]:
116 for filename in glob.glob(fileglob):
117 shutil.copy(filename, backup_dir)
118 if os.path.exists("case.run"):
119 os.remove("case.run")
120
121 # only do the following if are NOT in testmode
122 if not test_mode:
123 # rebuild the models (even on restart)
124 case.set_value("BUILD_COMPLETE", False)
125
126 # backup and then clean test script
127 if os.path.exists("case.test"):
128 shutil.copy("case.test", backup_dir)
129 os.remove("case.test")
130 logger.info("Successfully cleaned test script case.test")
131
132 if os.path.exists("case.testdriver"):
133 shutil.copy("case.testdriver", backup_dir)
134 os.remove("case.testdriver")
135 logger.info("Successfully cleaned test script case.testdriver")
136
137 logger.info("Successfully cleaned batch script case.run")
138
139 logger.info("Successfully cleaned batch script case.run")
140 logger.info("Some files have been saved to %s" % backup_dir)
141
142 msg = "case.setup clean complete"
143 append_status(msg, caseroot=caseroot, sfile="CaseStatus")
144
145 if not clean:
146 case.load_env()
147
148 models = case.get_values("COMP_CLASSES")
149 mach = case.get_value("MACH")
150 compiler = case.get_value("COMPILER")
151 debug = case.get_value("DEBUG")
152 mpilib = case.get_value("MPILIB")
153 sysos = case.get_value("OS")
154 expect(mach is not None, "xml variable MACH is not set")
155
156 # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler
157 # and env_mach_specific.xml if they don't already exist.
158 if not os.path.isfile("Macros.make") or not os.path.isfile("env_mach_specific.xml"):
159 configure(Machines(machine=mach), caseroot, ["Makefile"], compiler, mpilib, debug, sysos)
160
161 # Set tasks to 1 if mpi-serial library
162 if mpilib == "mpi-serial":
163 for vid, value in case:
164 if vid.startswith("NTASKS_") and value != 1:
165 case.set_value(vid, 1)
166
167 # Check ninst.
168 # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component.
169 for comp in models:
170 if comp == "DRV":
171 continue
172 ninst = case.get_value("NINST_%s" % comp)
173 ntasks = case.get_value("NTASKS_%s" % comp)
174 if ninst > ntasks:
175 if ntasks == 1:
176 case.set_value("NTASKS_%s" % comp, ninst)
177 else:
178 expect(False, "NINST_%s value %d greater than NTASKS_%s %d" % (comp, ninst, comp, ntasks))
179
180 if os.path.exists("case.run"):
181 logger.info("Machine/Decomp/Pes configuration has already been done ...skipping")
182 else:
183 _check_pelayouts_require_rebuild(case, models)
184
185 if os.path.exists("LockedFiles/env_build.xml"):
186 os.remove("LockedFiles/env_build.xml")
187
188 case.flush()
189 check_lockedfiles()
190 env_mach_pes = case.get_env("mach_pes")
191 pestot = env_mach_pes.get_total_tasks(models)
192 logger.debug("at update TOTALPES = %s"%pestot)
193 case.set_value("TOTALPES", pestot)
194 thread_count = env_mach_pes.get_max_thread_count(models)
195 if thread_count > 1:
196 case.set_value("BUILD_THREADED", True)
197
198 expect(not (case.get_value("BUILD_THREADED") and compiler == "nag"),
199 "it is not possible to run with OpenMP if using the NAG Fortran compiler")
200
201
202 cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value("MACH"))
203 case.set_value("COST_PES", cost_pes)
204
205 # create batch file
206 logger.info("Creating batch script case.run")
207
208 # Use BatchFactory to get the appropriate instance of a BatchMaker,
209 # use it to create our batch scripts
210 env_batch = case.get_env("batch")
211 num_nodes = env_mach_pes.get_total_nodes(pestot, thread_count)
212 tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count)
213 for job in env_batch.get_jobs():
214 input_batch_script = os.path.join(case.get_value("MACHDIR"), env_batch.get_value('template', subgroup=job))
215 if job == "case.test" and testcase is not None and not test_mode:
216 logger.info("Writing %s script" % job)
217 testscript = os.path.join(cimeroot, "scripts", "Testing", "Testcases", "%s_script" % testcase)
218 # Short term fix to be removed when csh tests are removed
219 if not os.path.exists(testscript):
220 env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)
221 elif job != "case.test":
222 logger.info("Writing %s script from input template %s" % (job, input_batch_script))
223 env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)
224
225 # Make a copy of env_mach_pes.xml in order to be able
226 # to check that it does not change once case.setup is invoked
227 logger.info("Locking file env_mach_pes.xml")
228 case.flush()
229 logger.debug("at copy TOTALPES = %s"%case.get_value("TOTALPES"))
230 shutil.copy("env_mach_pes.xml", "LockedFiles")
231
232 # Create user_nl files for the required number of instances
233 if not os.path.exists("user_nl_cpl"):
234 logger.info("Creating user_nl_xxx files for components and cpl")
235 # loop over models
236 for model in models:
237 comp = case.get_value("COMP_%s" % model)
238 logger.info("Building %s usernl files"%model)
239 _build_usernl_files(case, model, comp)
240 if comp == "cism":
241 run_cmd_no_fail("%s/../components/cism/cime_config/cism.template %s" % (cimeroot, caseroot))
242
243 _build_usernl_files(case, "drv", "cpl")
244
245 # Create needed directories for case
246 create_dirs(case)
247
248 logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building")
249
250 # Create test script if appropriate
251 # Short term fix to be removed when csh tests are removed
252 if os.path.exists("env_test.xml"):
253 if not os.path.exists("case.test"):
254 logger.info("Starting testcase.setup")
255 run_cmd_no_fail("./testcase.setup -caseroot %s" % caseroot)
256 logger.info("Finished testcase.setup")
257
258 # some tests need namelists created here (ERP)
259 if test_mode:
260 create_namelists(case)
261
262 msg = "case.setup complete"
263 append_status(msg, caseroot=caseroot, sfile="CaseStatus")
264
265 # Record env information
266 env_module = case.get_env("mach_specific")
267 env_module.make_env_mach_specific_file(compiler, debug, mpilib, "sh")
268 env_module.make_env_mach_specific_file(compiler, debug, mpilib, "csh")
269 with open("software_environment.txt", "w") as f:
270 f.write(env_module.list_modules())
271 run_cmd_no_fail("echo -e '\n' >> software_environment.txt && \
272 env >> software_environment.txt")
273
274 ###############################################################################
275 def case_setup(case, clean=False, test_mode=False, reset=False):
276 ###############################################################################
277 caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID")
278 if case.get_value("TEST"):
279 test_name = casebaseid if casebaseid is not None else case.get_value("CASE")
280 with TestStatus(test_dir=caseroot, test_name=test_name) as ts:
281 try:
282 _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)
283 except:
284 ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS)
285 raise
286 else:
287 ts.set_status(SETUP_PHASE, TEST_PASS_STATUS)
288 else:
289 _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/utils/python/CIME/case_setup.py b/utils/python/CIME/case_setup.py
--- a/utils/python/CIME/case_setup.py
+++ b/utils/python/CIME/case_setup.py
@@ -105,16 +105,7 @@
# Create batch script
if reset or clean:
- # Clean batch script
-
- backup_dir = "PESetupHist/b.%s" % time.strftime("%y%m%d-%H%M%S")
- if not os.path.isdir(backup_dir):
- os.makedirs(backup_dir)
-
# back up relevant files
- for fileglob in ["case.run", "env_build.xml", "env_mach_pes.xml", "Macros*"]:
- for filename in glob.glob(fileglob):
- shutil.copy(filename, backup_dir)
if os.path.exists("case.run"):
os.remove("case.run")
@@ -125,20 +116,15 @@
# backup and then clean test script
if os.path.exists("case.test"):
- shutil.copy("case.test", backup_dir)
os.remove("case.test")
logger.info("Successfully cleaned test script case.test")
if os.path.exists("case.testdriver"):
- shutil.copy("case.testdriver", backup_dir)
os.remove("case.testdriver")
logger.info("Successfully cleaned test script case.testdriver")
logger.info("Successfully cleaned batch script case.run")
- logger.info("Successfully cleaned batch script case.run")
- logger.info("Some files have been saved to %s" % backup_dir)
-
msg = "case.setup clean complete"
append_status(msg, caseroot=caseroot, sfile="CaseStatus")
|
{"golden_diff": "diff --git a/utils/python/CIME/case_setup.py b/utils/python/CIME/case_setup.py\n--- a/utils/python/CIME/case_setup.py\n+++ b/utils/python/CIME/case_setup.py\n@@ -105,16 +105,7 @@\n \n # Create batch script\n if reset or clean:\n- # Clean batch script\n-\n- backup_dir = \"PESetupHist/b.%s\" % time.strftime(\"%y%m%d-%H%M%S\")\n- if not os.path.isdir(backup_dir):\n- os.makedirs(backup_dir)\n-\n # back up relevant files\n- for fileglob in [\"case.run\", \"env_build.xml\", \"env_mach_pes.xml\", \"Macros*\"]:\n- for filename in glob.glob(fileglob):\n- shutil.copy(filename, backup_dir)\n if os.path.exists(\"case.run\"):\n os.remove(\"case.run\")\n \n@@ -125,20 +116,15 @@\n \n # backup and then clean test script\n if os.path.exists(\"case.test\"):\n- shutil.copy(\"case.test\", backup_dir)\n os.remove(\"case.test\")\n logger.info(\"Successfully cleaned test script case.test\")\n \n if os.path.exists(\"case.testdriver\"):\n- shutil.copy(\"case.testdriver\", backup_dir)\n os.remove(\"case.testdriver\")\n logger.info(\"Successfully cleaned test script case.testdriver\")\n \n logger.info(\"Successfully cleaned batch script case.run\")\n \n- logger.info(\"Successfully cleaned batch script case.run\")\n- logger.info(\"Some files have been saved to %s\" % backup_dir)\n-\n msg = \"case.setup clean complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n", "issue": "Remove PESetupHist from cime.setup\nWhen running cime.setup --clean, we don't need to save files to PESetupHist\n\n", "before_files": [{"content": "\"\"\"\nLibrary for case.setup.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.preview_namelists import create_dirs, create_namelists\nfrom CIME.XML.env_mach_pes import EnvMachPes\nfrom CIME.XML.machines import Machines\nfrom CIME.BuildTools.configure import configure\nfrom CIME.utils import append_status, get_cime_root\nfrom CIME.test_status import *\n\nimport shutil, time, glob\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _check_pelayouts_require_rebuild(case, models):\n###############################################################################\n \"\"\"\n Create if we require a rebuild, expects cwd is caseroot\n \"\"\"\n locked_pes = \"LockedFiles/env_mach_pes.xml\"\n if os.path.exists(locked_pes):\n # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined\n # for any component\n env_mach_pes_locked = EnvMachPes(infile=locked_pes)\n for comp in models:\n if case.get_value(\"%s_PE_CHANGE_REQUIRES_REBUILD\" % comp):\n # Changing these values in env_mach_pes.xml will force\n # you to clean the corresponding component\n old_tasks = env_mach_pes_locked.get_value(\"NTASKS_%s\" % comp)\n old_threads = env_mach_pes_locked.get_value(\"NTHRDS_%s\" % comp)\n old_inst = env_mach_pes_locked.get_value(\"NINST_%s\" % comp)\n\n new_tasks = case.get_value(\"NTASKS_%s\" % comp)\n new_threads = case.get_value(\"NTHRDS_%s\" % comp)\n new_inst = case.get_value(\"NINST_%s\" % comp)\n\n if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:\n logger.warn(\"%s pe change requires clean build\" % comp)\n cleanflag = comp.lower()\n run_cmd_no_fail(\"./case.build --clean %s\" % cleanflag)\n\n os.remove(locked_pes)\n\n###############################################################################\ndef _build_usernl_files(case, model, comp):\n###############################################################################\n \"\"\"\n Create user_nl_xxx files, expects cwd is caseroot\n \"\"\"\n model = model.upper()\n model_file = case.get_value(\"CONFIG_%s_FILE\" % model)\n model_dir = os.path.dirname(model_file)\n\n expect(os.path.isdir(model_dir),\n \"cannot find cime_config directory %s for component %s\" % (model_dir, comp))\n\n if comp == \"cpl\":\n if not os.path.exists(\"user_nl_cpl\"):\n shutil.copy(os.path.join(model_dir, \"user_nl_cpl\"), \".\")\n else:\n ninst = case.get_value(\"NINST_%s\" % model)\n nlfile = \"user_nl_%s\" % comp\n model_nl = os.path.join(model_dir, nlfile)\n if ninst > 1:\n for inst_counter in xrange(1, ninst+1):\n inst_nlfile = \"%s_%04d\" % (nlfile, inst_counter)\n if not os.path.exists(inst_nlfile):\n # If there is a user_nl_foo in the case directory, copy it\n # to user_nl_foo_INST; otherwise, copy the original\n # user_nl_foo from model_dir\n if os.path.exists(nlfile):\n shutil.copy(nlfile, inst_nlfile)\n elif os.path.exists(model_nl):\n shutil.copy(model_nl, inst_nlfile)\n else:\n # ninst = 1\n if not os.path.exists(nlfile):\n if os.path.exists(model_nl):\n shutil.copy(model_nl, nlfile)\n\n###############################################################################\ndef _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False):\n###############################################################################\n os.chdir(caseroot)\n msg = \"case.setup starting\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n cimeroot = get_cime_root(case)\n\n # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n testcase = case.get_value(\"TESTCASE\")\n expect(not (not os.path.isdir(din_loc_root) and testcase != \"SBN\"),\n \"inputdata root is not a directory: \\\"$din_loc_root\\\" \")\n\n # Check that userdefine settings are specified before expanding variable\n for vid, value in case:\n expect(not (type(value) is str and \"USERDEFINED_required_build\" in value),\n \"Parameter '%s' must be defined\" % vid)\n\n # Create batch script\n if reset or clean:\n # Clean batch script\n\n backup_dir = \"PESetupHist/b.%s\" % time.strftime(\"%y%m%d-%H%M%S\")\n if not os.path.isdir(backup_dir):\n os.makedirs(backup_dir)\n\n # back up relevant files\n for fileglob in [\"case.run\", \"env_build.xml\", \"env_mach_pes.xml\", \"Macros*\"]:\n for filename in glob.glob(fileglob):\n shutil.copy(filename, backup_dir)\n if os.path.exists(\"case.run\"):\n os.remove(\"case.run\")\n\n # only do the following if are NOT in testmode\n if not test_mode:\n # rebuild the models (even on restart)\n case.set_value(\"BUILD_COMPLETE\", False)\n\n # backup and then clean test script\n if os.path.exists(\"case.test\"):\n shutil.copy(\"case.test\", backup_dir)\n os.remove(\"case.test\")\n logger.info(\"Successfully cleaned test script case.test\")\n\n if os.path.exists(\"case.testdriver\"):\n shutil.copy(\"case.testdriver\", backup_dir)\n os.remove(\"case.testdriver\")\n logger.info(\"Successfully cleaned test script case.testdriver\")\n\n logger.info(\"Successfully cleaned batch script case.run\")\n\n logger.info(\"Successfully cleaned batch script case.run\")\n logger.info(\"Some files have been saved to %s\" % backup_dir)\n\n msg = \"case.setup clean complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n if not clean:\n case.load_env()\n\n models = case.get_values(\"COMP_CLASSES\")\n mach = case.get_value(\"MACH\")\n compiler = case.get_value(\"COMPILER\")\n debug = case.get_value(\"DEBUG\")\n mpilib = case.get_value(\"MPILIB\")\n sysos = case.get_value(\"OS\")\n expect(mach is not None, \"xml variable MACH is not set\")\n\n # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler\n # and env_mach_specific.xml if they don't already exist.\n if not os.path.isfile(\"Macros.make\") or not os.path.isfile(\"env_mach_specific.xml\"):\n configure(Machines(machine=mach), caseroot, [\"Makefile\"], compiler, mpilib, debug, sysos)\n\n # Set tasks to 1 if mpi-serial library\n if mpilib == \"mpi-serial\":\n for vid, value in case:\n if vid.startswith(\"NTASKS_\") and value != 1:\n case.set_value(vid, 1)\n\n # Check ninst.\n # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component.\n for comp in models:\n if comp == \"DRV\":\n continue\n ninst = case.get_value(\"NINST_%s\" % comp)\n ntasks = case.get_value(\"NTASKS_%s\" % comp)\n if ninst > ntasks:\n if ntasks == 1:\n case.set_value(\"NTASKS_%s\" % comp, ninst)\n else:\n expect(False, \"NINST_%s value %d greater than NTASKS_%s %d\" % (comp, ninst, comp, ntasks))\n\n if os.path.exists(\"case.run\"):\n logger.info(\"Machine/Decomp/Pes configuration has already been done ...skipping\")\n else:\n _check_pelayouts_require_rebuild(case, models)\n\n if os.path.exists(\"LockedFiles/env_build.xml\"):\n os.remove(\"LockedFiles/env_build.xml\")\n\n case.flush()\n check_lockedfiles()\n env_mach_pes = case.get_env(\"mach_pes\")\n pestot = env_mach_pes.get_total_tasks(models)\n logger.debug(\"at update TOTALPES = %s\"%pestot)\n case.set_value(\"TOTALPES\", pestot)\n thread_count = env_mach_pes.get_max_thread_count(models)\n if thread_count > 1:\n case.set_value(\"BUILD_THREADED\", True)\n\n expect(not (case.get_value(\"BUILD_THREADED\") and compiler == \"nag\"),\n \"it is not possible to run with OpenMP if using the NAG Fortran compiler\")\n\n\n cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value(\"MACH\"))\n case.set_value(\"COST_PES\", cost_pes)\n\n # create batch file\n logger.info(\"Creating batch script case.run\")\n\n # Use BatchFactory to get the appropriate instance of a BatchMaker,\n # use it to create our batch scripts\n env_batch = case.get_env(\"batch\")\n num_nodes = env_mach_pes.get_total_nodes(pestot, thread_count)\n tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count)\n for job in env_batch.get_jobs():\n input_batch_script = os.path.join(case.get_value(\"MACHDIR\"), env_batch.get_value('template', subgroup=job))\n if job == \"case.test\" and testcase is not None and not test_mode:\n logger.info(\"Writing %s script\" % job)\n testscript = os.path.join(cimeroot, \"scripts\", \"Testing\", \"Testcases\", \"%s_script\" % testcase)\n # Short term fix to be removed when csh tests are removed\n if not os.path.exists(testscript):\n env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)\n elif job != \"case.test\":\n logger.info(\"Writing %s script from input template %s\" % (job, input_batch_script))\n env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)\n\n # Make a copy of env_mach_pes.xml in order to be able\n # to check that it does not change once case.setup is invoked\n logger.info(\"Locking file env_mach_pes.xml\")\n case.flush()\n logger.debug(\"at copy TOTALPES = %s\"%case.get_value(\"TOTALPES\"))\n shutil.copy(\"env_mach_pes.xml\", \"LockedFiles\")\n\n # Create user_nl files for the required number of instances\n if not os.path.exists(\"user_nl_cpl\"):\n logger.info(\"Creating user_nl_xxx files for components and cpl\")\n # loop over models\n for model in models:\n comp = case.get_value(\"COMP_%s\" % model)\n logger.info(\"Building %s usernl files\"%model)\n _build_usernl_files(case, model, comp)\n if comp == \"cism\":\n run_cmd_no_fail(\"%s/../components/cism/cime_config/cism.template %s\" % (cimeroot, caseroot))\n\n _build_usernl_files(case, \"drv\", \"cpl\")\n\n # Create needed directories for case\n create_dirs(case)\n\n logger.info(\"If an old case build already exists, might want to run \\'case.build --clean\\' before building\")\n\n # Create test script if appropriate\n # Short term fix to be removed when csh tests are removed\n if os.path.exists(\"env_test.xml\"):\n if not os.path.exists(\"case.test\"):\n logger.info(\"Starting testcase.setup\")\n run_cmd_no_fail(\"./testcase.setup -caseroot %s\" % caseroot)\n logger.info(\"Finished testcase.setup\")\n\n # some tests need namelists created here (ERP)\n if test_mode:\n create_namelists(case)\n\n msg = \"case.setup complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n # Record env information\n env_module = case.get_env(\"mach_specific\")\n env_module.make_env_mach_specific_file(compiler, debug, mpilib, \"sh\")\n env_module.make_env_mach_specific_file(compiler, debug, mpilib, \"csh\")\n with open(\"software_environment.txt\", \"w\") as f:\n f.write(env_module.list_modules())\n run_cmd_no_fail(\"echo -e '\\n' >> software_environment.txt && \\\n env >> software_environment.txt\")\n\n###############################################################################\ndef case_setup(case, clean=False, test_mode=False, reset=False):\n###############################################################################\n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n if case.get_value(\"TEST\"):\n test_name = casebaseid if casebaseid is not None else case.get_value(\"CASE\")\n with TestStatus(test_dir=caseroot, test_name=test_name) as ts:\n try:\n _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)\n except:\n ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS)\n raise\n else:\n ts.set_status(SETUP_PHASE, TEST_PASS_STATUS)\n else:\n _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)\n", "path": "utils/python/CIME/case_setup.py"}], "after_files": [{"content": "\"\"\"\nLibrary for case.setup.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.check_lockedfiles import check_lockedfiles\nfrom CIME.preview_namelists import create_dirs, create_namelists\nfrom CIME.XML.env_mach_pes import EnvMachPes\nfrom CIME.XML.machines import Machines\nfrom CIME.BuildTools.configure import configure\nfrom CIME.utils import append_status, get_cime_root\nfrom CIME.test_status import *\n\nimport shutil, time, glob\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _check_pelayouts_require_rebuild(case, models):\n###############################################################################\n \"\"\"\n Create if we require a rebuild, expects cwd is caseroot\n \"\"\"\n locked_pes = \"LockedFiles/env_mach_pes.xml\"\n if os.path.exists(locked_pes):\n # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined\n # for any component\n env_mach_pes_locked = EnvMachPes(infile=locked_pes)\n for comp in models:\n if case.get_value(\"%s_PE_CHANGE_REQUIRES_REBUILD\" % comp):\n # Changing these values in env_mach_pes.xml will force\n # you to clean the corresponding component\n old_tasks = env_mach_pes_locked.get_value(\"NTASKS_%s\" % comp)\n old_threads = env_mach_pes_locked.get_value(\"NTHRDS_%s\" % comp)\n old_inst = env_mach_pes_locked.get_value(\"NINST_%s\" % comp)\n\n new_tasks = case.get_value(\"NTASKS_%s\" % comp)\n new_threads = case.get_value(\"NTHRDS_%s\" % comp)\n new_inst = case.get_value(\"NINST_%s\" % comp)\n\n if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst:\n logger.warn(\"%s pe change requires clean build\" % comp)\n cleanflag = comp.lower()\n run_cmd_no_fail(\"./case.build --clean %s\" % cleanflag)\n\n os.remove(locked_pes)\n\n###############################################################################\ndef _build_usernl_files(case, model, comp):\n###############################################################################\n \"\"\"\n Create user_nl_xxx files, expects cwd is caseroot\n \"\"\"\n model = model.upper()\n model_file = case.get_value(\"CONFIG_%s_FILE\" % model)\n model_dir = os.path.dirname(model_file)\n\n expect(os.path.isdir(model_dir),\n \"cannot find cime_config directory %s for component %s\" % (model_dir, comp))\n\n if comp == \"cpl\":\n if not os.path.exists(\"user_nl_cpl\"):\n shutil.copy(os.path.join(model_dir, \"user_nl_cpl\"), \".\")\n else:\n ninst = case.get_value(\"NINST_%s\" % model)\n nlfile = \"user_nl_%s\" % comp\n model_nl = os.path.join(model_dir, nlfile)\n if ninst > 1:\n for inst_counter in xrange(1, ninst+1):\n inst_nlfile = \"%s_%04d\" % (nlfile, inst_counter)\n if not os.path.exists(inst_nlfile):\n # If there is a user_nl_foo in the case directory, copy it\n # to user_nl_foo_INST; otherwise, copy the original\n # user_nl_foo from model_dir\n if os.path.exists(nlfile):\n shutil.copy(nlfile, inst_nlfile)\n elif os.path.exists(model_nl):\n shutil.copy(model_nl, inst_nlfile)\n else:\n # ninst = 1\n if not os.path.exists(nlfile):\n if os.path.exists(model_nl):\n shutil.copy(model_nl, nlfile)\n\n###############################################################################\ndef _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False):\n###############################################################################\n os.chdir(caseroot)\n msg = \"case.setup starting\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n cimeroot = get_cime_root(case)\n\n # Check that $DIN_LOC_ROOT exists - and abort if not a namelist compare tests\n din_loc_root = case.get_value(\"DIN_LOC_ROOT\")\n testcase = case.get_value(\"TESTCASE\")\n expect(not (not os.path.isdir(din_loc_root) and testcase != \"SBN\"),\n \"inputdata root is not a directory: \\\"$din_loc_root\\\" \")\n\n # Check that userdefine settings are specified before expanding variable\n for vid, value in case:\n expect(not (type(value) is str and \"USERDEFINED_required_build\" in value),\n \"Parameter '%s' must be defined\" % vid)\n\n # Create batch script\n if reset or clean:\n # back up relevant files\n if os.path.exists(\"case.run\"):\n os.remove(\"case.run\")\n\n # only do the following if are NOT in testmode\n if not test_mode:\n # rebuild the models (even on restart)\n case.set_value(\"BUILD_COMPLETE\", False)\n\n # backup and then clean test script\n if os.path.exists(\"case.test\"):\n os.remove(\"case.test\")\n logger.info(\"Successfully cleaned test script case.test\")\n\n if os.path.exists(\"case.testdriver\"):\n os.remove(\"case.testdriver\")\n logger.info(\"Successfully cleaned test script case.testdriver\")\n\n logger.info(\"Successfully cleaned batch script case.run\")\n\n msg = \"case.setup clean complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n if not clean:\n case.load_env()\n\n models = case.get_values(\"COMP_CLASSES\")\n mach = case.get_value(\"MACH\")\n compiler = case.get_value(\"COMPILER\")\n debug = case.get_value(\"DEBUG\")\n mpilib = case.get_value(\"MPILIB\")\n sysos = case.get_value(\"OS\")\n expect(mach is not None, \"xml variable MACH is not set\")\n\n # creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler\n # and env_mach_specific.xml if they don't already exist.\n if not os.path.isfile(\"Macros.make\") or not os.path.isfile(\"env_mach_specific.xml\"):\n configure(Machines(machine=mach), caseroot, [\"Makefile\"], compiler, mpilib, debug, sysos)\n\n # Set tasks to 1 if mpi-serial library\n if mpilib == \"mpi-serial\":\n for vid, value in case:\n if vid.startswith(\"NTASKS_\") and value != 1:\n case.set_value(vid, 1)\n\n # Check ninst.\n # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component.\n for comp in models:\n if comp == \"DRV\":\n continue\n ninst = case.get_value(\"NINST_%s\" % comp)\n ntasks = case.get_value(\"NTASKS_%s\" % comp)\n if ninst > ntasks:\n if ntasks == 1:\n case.set_value(\"NTASKS_%s\" % comp, ninst)\n else:\n expect(False, \"NINST_%s value %d greater than NTASKS_%s %d\" % (comp, ninst, comp, ntasks))\n\n if os.path.exists(\"case.run\"):\n logger.info(\"Machine/Decomp/Pes configuration has already been done ...skipping\")\n else:\n _check_pelayouts_require_rebuild(case, models)\n\n if os.path.exists(\"LockedFiles/env_build.xml\"):\n os.remove(\"LockedFiles/env_build.xml\")\n\n case.flush()\n check_lockedfiles()\n env_mach_pes = case.get_env(\"mach_pes\")\n pestot = env_mach_pes.get_total_tasks(models)\n logger.debug(\"at update TOTALPES = %s\"%pestot)\n case.set_value(\"TOTALPES\", pestot)\n thread_count = env_mach_pes.get_max_thread_count(models)\n if thread_count > 1:\n case.set_value(\"BUILD_THREADED\", True)\n\n expect(not (case.get_value(\"BUILD_THREADED\") and compiler == \"nag\"),\n \"it is not possible to run with OpenMP if using the NAG Fortran compiler\")\n\n\n cost_pes = env_mach_pes.get_cost_pes(pestot, thread_count, machine=case.get_value(\"MACH\"))\n case.set_value(\"COST_PES\", cost_pes)\n\n # create batch file\n logger.info(\"Creating batch script case.run\")\n\n # Use BatchFactory to get the appropriate instance of a BatchMaker,\n # use it to create our batch scripts\n env_batch = case.get_env(\"batch\")\n num_nodes = env_mach_pes.get_total_nodes(pestot, thread_count)\n tasks_per_node = env_mach_pes.get_tasks_per_node(pestot, thread_count)\n for job in env_batch.get_jobs():\n input_batch_script = os.path.join(case.get_value(\"MACHDIR\"), env_batch.get_value('template', subgroup=job))\n if job == \"case.test\" and testcase is not None and not test_mode:\n logger.info(\"Writing %s script\" % job)\n testscript = os.path.join(cimeroot, \"scripts\", \"Testing\", \"Testcases\", \"%s_script\" % testcase)\n # Short term fix to be removed when csh tests are removed\n if not os.path.exists(testscript):\n env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)\n elif job != \"case.test\":\n logger.info(\"Writing %s script from input template %s\" % (job, input_batch_script))\n env_batch.make_batch_script(input_batch_script, job, case, pestot, tasks_per_node, num_nodes, thread_count)\n\n # Make a copy of env_mach_pes.xml in order to be able\n # to check that it does not change once case.setup is invoked\n logger.info(\"Locking file env_mach_pes.xml\")\n case.flush()\n logger.debug(\"at copy TOTALPES = %s\"%case.get_value(\"TOTALPES\"))\n shutil.copy(\"env_mach_pes.xml\", \"LockedFiles\")\n\n # Create user_nl files for the required number of instances\n if not os.path.exists(\"user_nl_cpl\"):\n logger.info(\"Creating user_nl_xxx files for components and cpl\")\n # loop over models\n for model in models:\n comp = case.get_value(\"COMP_%s\" % model)\n logger.info(\"Building %s usernl files\"%model)\n _build_usernl_files(case, model, comp)\n if comp == \"cism\":\n run_cmd_no_fail(\"%s/../components/cism/cime_config/cism.template %s\" % (cimeroot, caseroot))\n\n _build_usernl_files(case, \"drv\", \"cpl\")\n\n # Create needed directories for case\n create_dirs(case)\n\n logger.info(\"If an old case build already exists, might want to run \\'case.build --clean\\' before building\")\n\n # Create test script if appropriate\n # Short term fix to be removed when csh tests are removed\n if os.path.exists(\"env_test.xml\"):\n if not os.path.exists(\"case.test\"):\n logger.info(\"Starting testcase.setup\")\n run_cmd_no_fail(\"./testcase.setup -caseroot %s\" % caseroot)\n logger.info(\"Finished testcase.setup\")\n\n # some tests need namelists created here (ERP)\n if test_mode:\n create_namelists(case)\n\n msg = \"case.setup complete\"\n append_status(msg, caseroot=caseroot, sfile=\"CaseStatus\")\n\n # Record env information\n env_module = case.get_env(\"mach_specific\")\n env_module.make_env_mach_specific_file(compiler, debug, mpilib, \"sh\")\n env_module.make_env_mach_specific_file(compiler, debug, mpilib, \"csh\")\n with open(\"software_environment.txt\", \"w\") as f:\n f.write(env_module.list_modules())\n run_cmd_no_fail(\"echo -e '\\n' >> software_environment.txt && \\\n env >> software_environment.txt\")\n\n###############################################################################\ndef case_setup(case, clean=False, test_mode=False, reset=False):\n###############################################################################\n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n if case.get_value(\"TEST\"):\n test_name = casebaseid if casebaseid is not None else case.get_value(\"CASE\")\n with TestStatus(test_dir=caseroot, test_name=test_name) as ts:\n try:\n _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)\n except:\n ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS)\n raise\n else:\n ts.set_status(SETUP_PHASE, TEST_PASS_STATUS)\n else:\n _case_setup_impl(case, caseroot, clean=clean, test_mode=test_mode, reset=reset)\n", "path": "utils/python/CIME/case_setup.py"}]}
| 4,012 | 375 |
gh_patches_debug_16343
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-1989
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Werkzeug useragent parsing is off for Motorola One Macro Android phone
I noticed this in a flask app but that uses werkzeug to parse useragent info and saw the same behavior in both the flask request object and standalone werkzeug useragent parser.
It looks like the presence of the substring "mac" in "macro" (the device model) is throwing it off. However, I feel like that should be overridden by the clear "Linux" and/or "Android 9".
```
>>> from werkzeug.useragents import UserAgent
>>> UserAgent("Mozilla/5.0 (Linux; Android 9; motorola one macro) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36").platform
'macos'
```
I would expect the `platform` for this useragent string to be "android".
Environment: Linux
- Python version: 3.7.3
- Werkzeug version: 1.01
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/werkzeug/useragents.py`
Content:
```
1 import re
2 import typing as t
3
4 if t.TYPE_CHECKING:
5 from wsgiref.types import WSGIEnvironment
6
7
8 class UserAgentParser:
9 """A simple user agent parser. Used by the `UserAgent`."""
10
11 platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
12 (" cros ", "chromeos"),
13 ("iphone|ios", "iphone"),
14 ("ipad", "ipad"),
15 (r"darwin|mac|os\s*x", "macos"),
16 ("win", "windows"),
17 (r"android", "android"),
18 ("netbsd", "netbsd"),
19 ("openbsd", "openbsd"),
20 ("freebsd", "freebsd"),
21 ("dragonfly", "dragonflybsd"),
22 ("(sun|i86)os", "solaris"),
23 (r"x11|lin(\b|ux)?", "linux"),
24 (r"nintendo\s+wii", "wii"),
25 ("irix", "irix"),
26 ("hp-?ux", "hpux"),
27 ("aix", "aix"),
28 ("sco|unix_sv", "sco"),
29 ("bsd", "bsd"),
30 ("amiga", "amiga"),
31 ("blackberry|playbook", "blackberry"),
32 ("symbian", "symbian"),
33 )
34 browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
35 ("googlebot", "google"),
36 ("msnbot", "msn"),
37 ("yahoo", "yahoo"),
38 ("ask jeeves", "ask"),
39 (r"aol|america\s+online\s+browser", "aol"),
40 (r"opera|opr", "opera"),
41 ("edge|edg", "edge"),
42 ("chrome|crios", "chrome"),
43 ("seamonkey", "seamonkey"),
44 ("firefox|firebird|phoenix|iceweasel", "firefox"),
45 ("galeon", "galeon"),
46 ("safari|version", "safari"),
47 ("webkit", "webkit"),
48 ("camino", "camino"),
49 ("konqueror", "konqueror"),
50 ("k-meleon", "kmeleon"),
51 ("netscape", "netscape"),
52 (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
53 ("lynx", "lynx"),
54 ("links", "links"),
55 ("Baiduspider", "baidu"),
56 ("bingbot", "bing"),
57 ("mozilla", "mozilla"),
58 )
59
60 _browser_version_re = r"(?:{pattern})[/\sa-z(]*(\d+[.\da-z]+)?"
61 _language_re = re.compile(
62 r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
63 r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
64 )
65
66 def __init__(self) -> None:
67 self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]
68 self.browsers = [
69 (b, re.compile(self._browser_version_re.format(pattern=a), re.I))
70 for a, b in self.browser_rules
71 ]
72
73 def __call__(
74 self, user_agent: str
75 ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:
76 platform: t.Optional[str]
77 browser: t.Optional[str]
78 version: t.Optional[str]
79 language: t.Optional[str]
80
81 for platform, regex in self.platforms: # noqa: B007
82 match = regex.search(user_agent)
83 if match is not None:
84 break
85 else:
86 platform = None
87 for browser, regex in self.browsers: # noqa: B007
88 match = regex.search(user_agent)
89 if match is not None:
90 version = match.group(1)
91 break
92 else:
93 browser = version = None
94 match = self._language_re.search(user_agent)
95 if match is not None:
96 language = match.group(1) or match.group(2)
97 else:
98 language = None
99 return platform, browser, version, language
100
101
102 class UserAgent:
103 """Represents a user agent. Pass it a WSGI environment or a user agent
104 string and you can inspect some of the details from the user agent
105 string via the attributes. The following attributes exist:
106
107 .. attribute:: string
108
109 the raw user agent string
110
111 .. attribute:: platform
112
113 the browser platform. ``None`` if not recognized.
114 The following platforms are currently recognized:
115
116 - `aix`
117 - `amiga`
118 - `android`
119 - `blackberry`
120 - `bsd`
121 - `chromeos`
122 - `dragonflybsd`
123 - `freebsd`
124 - `hpux`
125 - `ipad`
126 - `iphone`
127 - `irix`
128 - `linux`
129 - `macos`
130 - `netbsd`
131 - `openbsd`
132 - `sco`
133 - `solaris`
134 - `symbian`
135 - `wii`
136 - `windows`
137
138 .. attribute:: browser
139
140 the name of the browser. ``None`` if not recognized.
141 The following browsers are currently recognized:
142
143 - `aol` *
144 - `ask` *
145 - `baidu` *
146 - `bing` *
147 - `camino`
148 - `chrome`
149 - `edge`
150 - `firefox`
151 - `galeon`
152 - `google` *
153 - `kmeleon`
154 - `konqueror`
155 - `links`
156 - `lynx`
157 - `mozilla`
158 - `msie`
159 - `msn`
160 - `netscape`
161 - `opera`
162 - `safari`
163 - `seamonkey`
164 - `webkit`
165 - `yahoo` *
166
167 (Browsers marked with a star (``*``) are crawlers.)
168
169 .. attribute:: version
170
171 the version of the browser. ``None`` if not recognized.
172
173 .. attribute:: language
174
175 the language of the browser. ``None`` if not recognized.
176 """
177
178 _parser = UserAgentParser()
179
180 def __init__(self, environ_or_string: t.Union["WSGIEnvironment", str]) -> None:
181 if isinstance(environ_or_string, dict):
182 self.string = environ_or_string.get("HTTP_USER_AGENT", "")
183 else:
184 self.string = environ_or_string
185
186 self.platform, self.browser, self.version, self.language = self._parser(
187 self.string
188 )
189
190 def to_header(self) -> str:
191 return self.string
192
193 def __str__(self) -> str:
194 return self.string
195
196 def __bool__(self) -> bool:
197 return bool(self.browser)
198
199 def __repr__(self):
200 return f"<{type(self).__name__} {self.browser!r}/{self.version}>"
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -12,7 +12,7 @@
(" cros ", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
- (r"darwin|mac|os\s*x", "macos"),
+ (r"darwin\b|mac\b|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
@@ -20,7 +20,7 @@
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
- (r"x11|lin(\b|ux)?", "linux"),
+ (r"x11\b|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
|
{"golden_diff": "diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py\n--- a/src/werkzeug/useragents.py\n+++ b/src/werkzeug/useragents.py\n@@ -12,7 +12,7 @@\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n- (r\"darwin|mac|os\\s*x\", \"macos\"),\n+ (r\"darwin\\b|mac\\b|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n@@ -20,7 +20,7 @@\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n- (r\"x11|lin(\\b|ux)?\", \"linux\"),\n+ (r\"x11\\b|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n", "issue": "Werkzeug useragent parsing is off for Motorola One Macro Android phone\nI noticed this in a flask app but that uses werkzeug to parse useragent info and saw the same behavior in both the flask request object and standalone werkzeug useragent parser.\r\n\r\nIt looks like the presence of the substring \"mac\" in \"macro\" (the device model) is throwing it off. However, I feel like that should be overridden by the clear \"Linux\" and/or \"Android 9\".\r\n\r\n```\r\n>>> from werkzeug.useragents import UserAgent\r\n>>> UserAgent(\"Mozilla/5.0 (Linux; Android 9; motorola one macro) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36\").platform\r\n'macos'\r\n```\r\n\r\nI would expect the `platform` for this useragent string to be \"android\".\r\n\r\nEnvironment: Linux\r\n\r\n- Python version: 3.7.3\r\n- Werkzeug version: 1.01\r\n\n", "before_files": [{"content": "import re\nimport typing as t\n\nif t.TYPE_CHECKING:\n from wsgiref.types import WSGIEnvironment\n\n\nclass UserAgentParser:\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:{pattern})[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self) -> None:\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]\n self.browsers = [\n (b, re.compile(self._browser_version_re.format(pattern=a), re.I))\n for a, b in self.browser_rules\n ]\n\n def __call__(\n self, user_agent: str\n ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:\n platform: t.Optional[str]\n browser: t.Optional[str]\n version: t.Optional[str]\n language: t.Optional[str]\n\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent:\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string: t.Union[\"WSGIEnvironment\", str]) -> None:\n if isinstance(environ_or_string, dict):\n self.string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n else:\n self.string = environ_or_string\n\n self.platform, self.browser, self.version, self.language = self._parser(\n self.string\n )\n\n def to_header(self) -> str:\n return self.string\n\n def __str__(self) -> str:\n return self.string\n\n def __bool__(self) -> bool:\n return bool(self.browser)\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.browser!r}/{self.version}>\"\n", "path": "src/werkzeug/useragents.py"}], "after_files": [{"content": "import re\nimport typing as t\n\nif t.TYPE_CHECKING:\n from wsgiref.types import WSGIEnvironment\n\n\nclass UserAgentParser:\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin\\b|mac\\b|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11\\b|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:{pattern})[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self) -> None:\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]\n self.browsers = [\n (b, re.compile(self._browser_version_re.format(pattern=a), re.I))\n for a, b in self.browser_rules\n ]\n\n def __call__(\n self, user_agent: str\n ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:\n platform: t.Optional[str]\n browser: t.Optional[str]\n version: t.Optional[str]\n language: t.Optional[str]\n\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent:\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string: t.Union[\"WSGIEnvironment\", str]) -> None:\n if isinstance(environ_or_string, dict):\n self.string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n else:\n self.string = environ_or_string\n\n self.platform, self.browser, self.version, self.language = self._parser(\n self.string\n )\n\n def to_header(self) -> str:\n return self.string\n\n def __str__(self) -> str:\n return self.string\n\n def __bool__(self) -> bool:\n return bool(self.browser)\n\n def __repr__(self):\n return f\"<{type(self).__name__} {self.browser!r}/{self.version}>\"\n", "path": "src/werkzeug/useragents.py"}]}
| 2,622 | 257 |
gh_patches_debug_36196
|
rasdani/github-patches
|
git_diff
|
conda__conda-3969
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid requirement while trying to use pip options
Hi!
I have in my pip section inside envrionment.yaml file this line
```- rep --install-option='--no-deps'```
while I am trying to update my environment I am getting this error
```Invalid requirement: 'rep --install-option='--no-deps''```
if I do pip -r requirements.txt and I have that line as it is in requirements.txt it works.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_env/installers/pip.py`
Content:
```
1 from __future__ import absolute_import
2 import subprocess
3 from conda_env.pip_util import pip_args
4 from conda.exceptions import CondaValueError
5
6
7 def install(prefix, specs, args, env, prune=False):
8 pip_cmd = pip_args(prefix) + ['install', ] + specs
9 process = subprocess.Popen(pip_cmd, universal_newlines=True)
10 process.communicate()
11
12 if process.returncode != 0:
13 raise CondaValueError("pip returned an error.")
14
```
Path: `conda/egg_info.py`
Content:
```
1 """
2 Functions related to core conda functionality that relates to manually
3 installed Python packages, e.g. using "python setup.py install", or "pip".
4 """
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 from io import open
8 import os
9 from os.path import isdir, isfile, join
10 import re
11 import sys
12
13 from .common.compat import itervalues, on_win
14 from .core.linked_data import linked_data
15 from .misc import rel_path
16 from .models.dist import Dist
17
18
19 def get_site_packages_dir(installed_pkgs):
20 for info in itervalues(installed_pkgs):
21 if info['name'] == 'python':
22 if on_win:
23 stdlib_dir = 'Lib'
24 else:
25 py_ver = info['version'][:3]
26 stdlib_dir = 'lib/python%s' % py_ver
27 return join(stdlib_dir, 'site-packages')
28 return None
29
30
31 def get_egg_info_files(sp_dir):
32 for fn in os.listdir(sp_dir):
33 if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
34 continue
35 path = join(sp_dir, fn)
36 if isfile(path):
37 yield path
38 elif isdir(path):
39 for path2 in [join(path, 'PKG-INFO'),
40 join(path, 'EGG-INFO', 'PKG-INFO'),
41 join(path, 'METADATA')]:
42 if isfile(path2):
43 yield path2
44
45
46 pat = re.compile(r'(\w+):\s*(\S+)', re.I)
47 def parse_egg_info(path):
48 """
49 Parse an .egg-info file and return its canonical distribution name
50 """
51 info = {}
52 for line in open(path, encoding='utf-8'):
53 line = line.strip()
54 m = pat.match(line)
55 if m:
56 key = m.group(1).lower()
57 info[key] = m.group(2)
58 try:
59 return '%(name)s-%(version)s-<pip>' % info
60 except KeyError:
61 pass
62 return None
63
64
65 def get_egg_info(prefix, all_pkgs=False):
66 """
67 Return a set of canonical names of all Python packages (in `prefix`),
68 by inspecting the .egg-info files inside site-packages.
69 By default, only untracked (not conda installed) .egg-info files are
70 considered. Setting `all_pkgs` to True changes this.
71 """
72 installed_pkgs = linked_data(prefix)
73 sp_dir = get_site_packages_dir(installed_pkgs)
74 if sp_dir is None:
75 return set()
76
77 conda_files = set()
78 for info in itervalues(installed_pkgs):
79 conda_files.update(info.get('files', []))
80
81 res = set()
82 for path in get_egg_info_files(join(prefix, sp_dir)):
83 f = rel_path(prefix, path)
84 if all_pkgs or f not in conda_files:
85 try:
86 dist = parse_egg_info(path)
87 except UnicodeDecodeError:
88 dist = None
89 if dist:
90 res.add(Dist(dist))
91 return res
92
93
94 if __name__ == '__main__':
95 from pprint import pprint
96 pprint(get_egg_info(sys.prefix))
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/egg_info.py b/conda/egg_info.py
--- a/conda/egg_info.py
+++ b/conda/egg_info.py
@@ -30,6 +30,10 @@
def get_egg_info_files(sp_dir):
for fn in os.listdir(sp_dir):
+ if fn.endswith('.egg-link'):
+ with open(join(sp_dir, fn), 'r') as reader:
+ for egg in get_egg_info_files(reader.readline().strip()):
+ yield egg
if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
continue
path = join(sp_dir, fn)
diff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py
--- a/conda_env/installers/pip.py
+++ b/conda_env/installers/pip.py
@@ -1,13 +1,56 @@
from __future__ import absolute_import
+
+import os
+import os.path as op
import subprocess
+import tempfile
from conda_env.pip_util import pip_args
from conda.exceptions import CondaValueError
-def install(prefix, specs, args, env, prune=False):
- pip_cmd = pip_args(prefix) + ['install', ] + specs
- process = subprocess.Popen(pip_cmd, universal_newlines=True)
- process.communicate()
+def _pip_install_via_requirements(prefix, specs, args, *_):
+ """
+ Installs the pip dependencies in specs using a temporary pip requirements file.
+
+ Args
+ ----
+ prefix: string
+ The path to the python and pip executables.
+
+ specs: iterable of strings
+ Each element should be a valid pip dependency.
+ See: https://pip.pypa.io/en/stable/user_guide/#requirements-files
+ https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format
+ """
+ try:
+ pip_workdir = op.dirname(op.abspath(args.file))
+ except AttributeError:
+ pip_workdir = None
+ requirements = None
+ try:
+ # Generate the temporary requirements file
+ requirements = tempfile.NamedTemporaryFile(mode='w',
+ prefix='condaenv.',
+ suffix='.requirements.txt',
+ dir=pip_workdir,
+ delete=False)
+ requirements.write('\n'.join(specs))
+ requirements.close()
+ # pip command line...
+ pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]
+ # ...run it
+ process = subprocess.Popen(pip_cmd,
+ cwd=pip_workdir,
+ universal_newlines=True)
+ process.communicate()
+ if process.returncode != 0:
+ raise CondaValueError("pip returned an error")
+ finally:
+ # Win/Appveyor does not like it if we use context manager + delete=True.
+ # So we delete the temporary file in a finally block.
+ if requirements is not None and op.isfile(requirements.name):
+ os.remove(requirements.name)
+
- if process.returncode != 0:
- raise CondaValueError("pip returned an error.")
+# Conform to Installers API
+install = _pip_install_via_requirements
|
{"golden_diff": "diff --git a/conda/egg_info.py b/conda/egg_info.py\n--- a/conda/egg_info.py\n+++ b/conda/egg_info.py\n@@ -30,6 +30,10 @@\n \n def get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n+ if fn.endswith('.egg-link'):\n+ with open(join(sp_dir, fn), 'r') as reader:\n+ for egg in get_egg_info_files(reader.readline().strip()):\n+ yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\ndiff --git a/conda_env/installers/pip.py b/conda_env/installers/pip.py\n--- a/conda_env/installers/pip.py\n+++ b/conda_env/installers/pip.py\n@@ -1,13 +1,56 @@\n from __future__ import absolute_import\n+\n+import os\n+import os.path as op\n import subprocess\n+import tempfile\n from conda_env.pip_util import pip_args\n from conda.exceptions import CondaValueError\n \n \n-def install(prefix, specs, args, env, prune=False):\n- pip_cmd = pip_args(prefix) + ['install', ] + specs\n- process = subprocess.Popen(pip_cmd, universal_newlines=True)\n- process.communicate()\n+def _pip_install_via_requirements(prefix, specs, args, *_):\n+ \"\"\"\n+ Installs the pip dependencies in specs using a temporary pip requirements file.\n+\n+ Args\n+ ----\n+ prefix: string\n+ The path to the python and pip executables.\n+\n+ specs: iterable of strings\n+ Each element should be a valid pip dependency.\n+ See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n+ https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n+ \"\"\"\n+ try:\n+ pip_workdir = op.dirname(op.abspath(args.file))\n+ except AttributeError:\n+ pip_workdir = None\n+ requirements = None\n+ try:\n+ # Generate the temporary requirements file\n+ requirements = tempfile.NamedTemporaryFile(mode='w',\n+ prefix='condaenv.',\n+ suffix='.requirements.txt',\n+ dir=pip_workdir,\n+ delete=False)\n+ requirements.write('\\n'.join(specs))\n+ requirements.close()\n+ # pip command line...\n+ pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n+ # ...run it\n+ process = subprocess.Popen(pip_cmd,\n+ cwd=pip_workdir,\n+ universal_newlines=True)\n+ process.communicate()\n+ if process.returncode != 0:\n+ raise CondaValueError(\"pip returned an error\")\n+ finally:\n+ # Win/Appveyor does not like it if we use context manager + delete=True.\n+ # So we delete the temporary file in a finally block.\n+ if requirements is not None and op.isfile(requirements.name):\n+ os.remove(requirements.name)\n+\n \n- if process.returncode != 0:\n- raise CondaValueError(\"pip returned an error.\")\n+# Conform to Installers API\n+install = _pip_install_via_requirements\n", "issue": "Invalid requirement while trying to use pip options\nHi!\r\n\r\nI have in my pip section inside envrionment.yaml file this line\r\n```- rep --install-option='--no-deps'```\r\n\r\nwhile I am trying to update my environment I am getting this error\r\n\r\n```Invalid requirement: 'rep --install-option='--no-deps''```\r\n\r\nif I do pip -r requirements.txt and I have that line as it is in requirements.txt it works.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport subprocess\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef install(prefix, specs, args, env, prune=False):\n pip_cmd = pip_args(prefix) + ['install', ] + specs\n process = subprocess.Popen(pip_cmd, universal_newlines=True)\n process.communicate()\n\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error.\")\n", "path": "conda_env/installers/pip.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport os\nimport os.path as op\nimport subprocess\nimport tempfile\nfrom conda_env.pip_util import pip_args\nfrom conda.exceptions import CondaValueError\n\n\ndef _pip_install_via_requirements(prefix, specs, args, *_):\n \"\"\"\n Installs the pip dependencies in specs using a temporary pip requirements file.\n\n Args\n ----\n prefix: string\n The path to the python and pip executables.\n\n specs: iterable of strings\n Each element should be a valid pip dependency.\n See: https://pip.pypa.io/en/stable/user_guide/#requirements-files\n https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format\n \"\"\"\n try:\n pip_workdir = op.dirname(op.abspath(args.file))\n except AttributeError:\n pip_workdir = None\n requirements = None\n try:\n # Generate the temporary requirements file\n requirements = tempfile.NamedTemporaryFile(mode='w',\n prefix='condaenv.',\n suffix='.requirements.txt',\n dir=pip_workdir,\n delete=False)\n requirements.write('\\n'.join(specs))\n requirements.close()\n # pip command line...\n pip_cmd = pip_args(prefix) + ['install', '-r', requirements.name]\n # ...run it\n process = subprocess.Popen(pip_cmd,\n cwd=pip_workdir,\n universal_newlines=True)\n process.communicate()\n if process.returncode != 0:\n raise CondaValueError(\"pip returned an error\")\n finally:\n # Win/Appveyor does not like it if we use context manager + delete=True.\n # So we delete the temporary file in a finally block.\n if requirements is not None and op.isfile(requirements.name):\n os.remove(requirements.name)\n\n\n# Conform to Installers API\ninstall = _pip_install_via_requirements\n", "path": "conda_env/installers/pip.py"}, {"content": "\"\"\"\nFunctions related to core conda functionality that relates to manually\ninstalled Python packages, e.g. using \"python setup.py install\", or \"pip\".\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom io import open\nimport os\nfrom os.path import isdir, isfile, join\nimport re\nimport sys\n\nfrom .common.compat import itervalues, on_win\nfrom .core.linked_data import linked_data\nfrom .misc import rel_path\nfrom .models.dist import Dist\n\n\ndef get_site_packages_dir(installed_pkgs):\n for info in itervalues(installed_pkgs):\n if info['name'] == 'python':\n if on_win:\n stdlib_dir = 'Lib'\n else:\n py_ver = info['version'][:3]\n stdlib_dir = 'lib/python%s' % py_ver\n return join(stdlib_dir, 'site-packages')\n return None\n\n\ndef get_egg_info_files(sp_dir):\n for fn in os.listdir(sp_dir):\n if fn.endswith('.egg-link'):\n with open(join(sp_dir, fn), 'r') as reader:\n for egg in get_egg_info_files(reader.readline().strip()):\n yield egg\n if not fn.endswith(('.egg', '.egg-info', '.dist-info')):\n continue\n path = join(sp_dir, fn)\n if isfile(path):\n yield path\n elif isdir(path):\n for path2 in [join(path, 'PKG-INFO'),\n join(path, 'EGG-INFO', 'PKG-INFO'),\n join(path, 'METADATA')]:\n if isfile(path2):\n yield path2\n\n\npat = re.compile(r'(\\w+):\\s*(\\S+)', re.I)\ndef parse_egg_info(path):\n \"\"\"\n Parse an .egg-info file and return its canonical distribution name\n \"\"\"\n info = {}\n for line in open(path, encoding='utf-8'):\n line = line.strip()\n m = pat.match(line)\n if m:\n key = m.group(1).lower()\n info[key] = m.group(2)\n try:\n return '%(name)s-%(version)s-<pip>' % info\n except KeyError:\n pass\n return None\n\n\ndef get_egg_info(prefix, all_pkgs=False):\n \"\"\"\n Return a set of canonical names of all Python packages (in `prefix`),\n by inspecting the .egg-info files inside site-packages.\n By default, only untracked (not conda installed) .egg-info files are\n considered. Setting `all_pkgs` to True changes this.\n \"\"\"\n installed_pkgs = linked_data(prefix)\n sp_dir = get_site_packages_dir(installed_pkgs)\n if sp_dir is None:\n return set()\n\n conda_files = set()\n for info in itervalues(installed_pkgs):\n conda_files.update(info.get('files', []))\n\n res = set()\n for path in get_egg_info_files(join(prefix, sp_dir)):\n f = rel_path(prefix, path)\n if all_pkgs or f not in conda_files:\n try:\n dist = parse_egg_info(path)\n except UnicodeDecodeError:\n dist = None\n if dist:\n res.add(Dist(dist))\n return res\n\n\nif __name__ == '__main__':\n from pprint import pprint\n pprint(get_egg_info(sys.prefix))\n", "path": "conda/egg_info.py"}]}
| 1,382 | 715 |
gh_patches_debug_52514
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-2079
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade of dependencies for 1.19.0
Modification
------------
# pip-requires
SQLAlchemy 1.2.7 --> 1.2.16
alembic 0.9.9 --> 1.0.6
python-dateutil 2.7.2 --> 2.7.5
s3cmd 2.0.1 --> 2.0.2
stomp.py 4.1.20 --> 4.1.21
dnspython 1.15.0 --> 1.16.0
geoip2 2.8.0 --> 2.9.0
maxminddb 1.3.0 --> 1.4.1
pycparser 2.18 --> 2.19
googleapis-common-protos 1.5.3 --> 1.5.6
httplib2 0.11.3 --> 0.12.0
pyOpenSSL 18.0.0 --> 19.0.0
cryptography 2.3.1 --> 2.4.2
oauth2client 4.1.2 --> 4.1.3
protobuf 3.5.2.post1 --> 3.6.1
grpcio 1.11.0 --> 1.18.0
pyasn1 0.4.2 --> 0.4.5
pyasn1-modules 0.2.1 --> 0.2.3
rsa 3.4.2 --> 4.0
redis 2.10.6 --> 3.0.1
numpy 1.14.2 --> 1.16.0
paramiko 2.4.1 --> 2.4.2
Flask 0.12.4 --> 1.0.2
idna 2.6 --> 2.8
MyProxyClient 2.0.1 --> 2.1.0
# pip-requires-test
Paste 2.0.3 --> 3.0.6
coverage 4.4.2 --> 4.5.2
Sphinx 1.6.5 --> 1.8.3
sphinx-rtd-theme 0.2.4 --> 0.4.2
sphinxcontrib-httpdomain 1.6.0 --> 1.7.0
Pygments 2.2.0 --> 2.3.1
pyflakes 1.6.0 --> 2.0.0
flake8 3.5.0 --> 3.6.0
pylint 1.7.4 --> 2.2.2
virtualenv 15.1.0 --> 16.2.0
tox 2.9.1 --> 3.7.0
pytest 3.2.5 --> 4.1.1
pytest-xdist 1.20.1 --> 1.26.0
pytz 2017.3 --> 2018.9
Babel 2.5.1 --> 2.6.0
subprocess32 3.2.7 --> 3.5.3
pycodestyle 2.3.1 --> 2.4.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py`
Content:
```
1 # Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Martin Barisits <[email protected]>, 2019
17 #
18 # Topic: Replicas table PK definition is in wrong order
19 # Revision ID: 3345511706b8
20 # Revises: 9eb936a81eb1
21 # Creation Date: 2019-01-30 14:20:35.058889
22
23 from alembic.op import (create_primary_key, drop_constraint)
24
25 from alembic import context
26
27
28 # revision identifiers used by alembic
29 revision = '3345511706b8' # pylint: disable=invalid-name
30 down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name
31
32
33 def upgrade():
34 '''
35 Upgrade the database to this revision
36 '''
37 if context.get_context().dialect.name != 'sqlite':
38 drop_constraint('REPLICAS_PK', 'replicas')
39 create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])
40
41
42 def downgrade():
43 '''
44 Downgrade the database to the previous revision
45 '''
46 if context.get_context().dialect.name != 'sqlite':
47 drop_constraint('REPLICAS_PK', 'replicas')
48 create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py
--- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py
+++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py
@@ -27,7 +27,7 @@
# revision identifiers used by alembic
revision = '3345511706b8' # pylint: disable=invalid-name
-down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name
+down_revision = '01eaf73ab656' # pylint: disable=invalid-name
def upgrade():
|
{"golden_diff": "diff --git a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n--- a/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n+++ b/lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py\n@@ -27,7 +27,7 @@\n \n # revision identifiers used by alembic\n revision = '3345511706b8' # pylint: disable=invalid-name\n-down_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n+down_revision = '01eaf73ab656' # pylint: disable=invalid-name\n \n \n def upgrade():\n", "issue": "Upgrade of dependencies for 1.19.0\nModification\r\n------------\r\n# pip-requires\r\nSQLAlchemy 1.2.7 --> 1.2.16\r\nalembic 0.9.9 --> 1.0.6\r\npython-dateutil 2.7.2 --> 2.7.5\r\ns3cmd 2.0.1 --> 2.0.2\r\nstomp.py 4.1.20 --> 4.1.21\r\ndnspython 1.15.0 --> 1.16.0\r\ngeoip2 2.8.0 --> 2.9.0\r\nmaxminddb 1.3.0 --> 1.4.1\r\npycparser 2.18 --> 2.19\r\ngoogleapis-common-protos 1.5.3 --> 1.5.6\r\nhttplib2 0.11.3 --> 0.12.0\r\npyOpenSSL 18.0.0 --> 19.0.0\r\ncryptography 2.3.1 --> 2.4.2\r\noauth2client 4.1.2 --> 4.1.3\r\nprotobuf 3.5.2.post1 --> 3.6.1\r\ngrpcio 1.11.0 --> 1.18.0\r\npyasn1 0.4.2 --> 0.4.5\r\npyasn1-modules 0.2.1 --> 0.2.3\r\nrsa 3.4.2 --> 4.0\r\nredis 2.10.6 --> 3.0.1\r\nnumpy 1.14.2 --> 1.16.0\r\nparamiko 2.4.1 --> 2.4.2\r\nFlask 0.12.4 --> 1.0.2\r\nidna 2.6 --> 2.8\r\nMyProxyClient 2.0.1 --> 2.1.0\r\n\r\n# pip-requires-test\r\nPaste 2.0.3 --> 3.0.6\r\ncoverage 4.4.2 --> 4.5.2\r\nSphinx 1.6.5 --> 1.8.3\r\nsphinx-rtd-theme 0.2.4 --> 0.4.2\r\nsphinxcontrib-httpdomain 1.6.0 --> 1.7.0\r\nPygments 2.2.0 --> 2.3.1\r\npyflakes 1.6.0 --> 2.0.0\r\nflake8 3.5.0 --> 3.6.0\r\npylint 1.7.4 --> 2.2.2\r\nvirtualenv 15.1.0 --> 16.2.0\r\ntox 2.9.1 --> 3.7.0\r\npytest 3.2.5 --> 4.1.1\r\npytest-xdist 1.20.1 --> 1.26.0\r\npytz 2017.3 --> 2018.9\r\nBabel 2.5.1 --> 2.6.0\r\nsubprocess32 3.2.7 --> 3.5.3\r\npycodestyle 2.3.1 --> 2.4.0\n", "before_files": [{"content": "# Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <[email protected]>, 2019\n#\n# Topic: Replicas table PK definition is in wrong order\n# Revision ID: 3345511706b8\n# Revises: 9eb936a81eb1\n# Creation Date: 2019-01-30 14:20:35.058889\n\nfrom alembic.op import (create_primary_key, drop_constraint)\n\nfrom alembic import context\n\n\n# revision identifiers used by alembic\nrevision = '3345511706b8' # pylint: disable=invalid-name\ndown_revision = 'bf3baa1c1474' # pylint: disable=invalid-name\n\n\ndef upgrade():\n '''\n Upgrade the database to this revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])\n\n\ndef downgrade():\n '''\n Downgrade the database to the previous revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])\n", "path": "lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py"}], "after_files": [{"content": "# Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Martin Barisits <[email protected]>, 2019\n#\n# Topic: Replicas table PK definition is in wrong order\n# Revision ID: 3345511706b8\n# Revises: 9eb936a81eb1\n# Creation Date: 2019-01-30 14:20:35.058889\n\nfrom alembic.op import (create_primary_key, drop_constraint)\n\nfrom alembic import context\n\n\n# revision identifiers used by alembic\nrevision = '3345511706b8' # pylint: disable=invalid-name\ndown_revision = '01eaf73ab656' # pylint: disable=invalid-name\n\n\ndef upgrade():\n '''\n Upgrade the database to this revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['scope', 'name', 'rse_id'])\n\n\ndef downgrade():\n '''\n Downgrade the database to the previous revision\n '''\n if context.get_context().dialect.name != 'sqlite':\n drop_constraint('REPLICAS_PK', 'replicas')\n create_primary_key('REPLICAS_PK', 'replicas', ['rse_id', 'scope', 'name'])\n", "path": "lib/rucio/db/sqla/migrate_repo/versions/3345511706b8_replicas_table_pk_definition_is_in_.py"}]}
| 1,585 | 248 |
gh_patches_debug_28042
|
rasdani/github-patches
|
git_diff
|
ros__ros_comm-502
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
traffic of statistics is wrong
The entry traffic in statistics seems to get calculated wrong.
**Example setup:**
```
$ rosparam set enable_statistics true
```
```
$ rostopic pub /table std_msgs/String "data: '1234' " -r 100
```
```
$ rostopic bw /table
subscribed to [/table]
average: 805.20B/s
mean: 8.00B min: 8.00B max: 8.00B window: 99
```
```
$ rostopic echo /statistics
topic: /table
node_pub: /rostopic_3963_1411225760462
node_sub: /rostopic_4239_1411226004615
window_start:
secs: 1411226011
nsecs: 883635044
window_stop:
secs: 1411226019
nsecs: 886126995
delivered_msgs: 800
dropped_msgs: 0
traffic: 12
[...]
```
Whereas traffic in this example should be ~ 8\* 800Byte = **1600 Byte** but is **12 Byte**.
Changing the publish frequency does not lead in a change in traffic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `clients/rospy/src/rospy/impl/statistics.py`
Content:
```
1 # Software License Agreement (BSD License)
2 #
3 # Copyright (c) 2013-2014 Dariush Forouher
4 # All rights reserved.
5 #
6 # Based on code adapted from diagnostics_updater by Blaise Gassend
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions
10 # are met:
11 #
12 # * Redistributions of source code must retain the above copyright
13 # notice, this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above
15 # copyright notice, this list of conditions and the following
16 # disclaimer in the documentation and/or other materials provided
17 # with the distribution.
18 # * Neither the name of Willow Garage, Inc. nor the names of its
19 # contributors may be used to endorse or promote products derived
20 # from this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 # POSSIBILITY OF SUCH DAMAGE.
34
35 from math import sqrt
36 import logging
37 import sys
38
39 from rosgraph_msgs.msg import TopicStatistics
40 import rospy
41
42 _logger = logging.getLogger('rospy.impl.statistics')
43
44
45 class SubscriberStatisticsLogger():
46 """
47 Class that monitors each subscriber.
48
49 this class basically just keeps a collection of ConnectionStatisticsLogger.
50 """
51
52 def __init__(self, subscriber):
53 self.subscriber = subscriber
54 self.connections = dict()
55 self.read_parameters()
56
57 def read_parameters(self):
58 """
59 Fetch window parameters from parameter server
60 """
61
62 # disable statistics if node can't talk to parameter server which is the case in unit tests
63 try:
64 self.enabled = rospy.get_param("/enable_statistics", False)
65 except:
66 self.enabled = False
67 return
68
69 # Range of window length, in seconds
70 self.min_elements = rospy.get_param("/statistics_window_min_elements", 10)
71 self.max_elements = rospy.get_param("/statistics_window_max_elements", 100)
72
73 # Range of acceptable messages in window.
74 # Window size will be adjusted if number of observed is
75 # outside this range.
76 self.max_window = rospy.get_param("/statistics_window_max_size", 64)
77 self.min_window = rospy.get_param("/statistics_window_min_size", 4)
78
79 def is_enable_statistics(self):
80 return self.enabled
81
82 def callback(self, msg, publisher, stat_bytes):
83 """
84 This method is called for every message that has been received.
85
86 @param msg: The message received.
87 @param publisher: The name of the publisher node that sent the msg
88 @param stat_bytes: A counter, how many bytes have been moved across
89 this connection since it exists.
90
91 This method just looks up the ConnectionStatisticsLogger for the specific connection
92 between publisher and subscriber and delegates to statistics logging to that
93 instance.
94 """
95
96 if not self.enabled:
97 return
98
99 # /clock is special, as it is subscribed very early
100 # also exclude /statistics to reduce noise.
101 if self.subscriber.name == "/clock" or self.subscriber.name == "/statistics":
102 return
103
104 try:
105 # create ConnectionStatisticsLogger for new connections
106 logger = self.connections.get(publisher)
107 if logger is None:
108 logger = ConnectionStatisticsLogger(self.subscriber.name, rospy.get_name(), publisher)
109 self.connections[publisher] = logger
110
111 # delegate stuff to that instance
112 logger.callback(self, msg, stat_bytes)
113 except Exception as e:
114 rospy.logerr("Unexpected error during statistics measurement: %s", str(e))
115
116
117 class ConnectionStatisticsLogger():
118 """
119 Class that monitors lots of stuff for each connection.
120
121 is created whenever a subscriber is created.
122 is destroyed whenever its parent subscriber is destroyed.
123 its lifecycle is therefore bound to its parent subscriber.
124 """
125
126 def __init__(self, topic, subscriber, publisher):
127 """
128 Constructor.
129
130 @param topic: Name of the topic
131 @param subscriber: Name of the subscriber
132 @param publisher: Name of the publisher
133
134 These three should uniquely identify the connection.
135 """
136
137 self.topic = topic
138 self.subscriber = subscriber
139 self.publisher = publisher
140
141 self.pub = rospy.Publisher("/statistics", TopicStatistics, queue_size=10)
142
143 # reset window
144 self.last_pub_time = rospy.Time(0)
145 self.pub_frequency = rospy.Duration(1.0)
146
147 # timestamp age
148 self.age_list_ = []
149
150 # period calculations
151 self.arrival_time_list_ = []
152
153 self.last_seq_ = 0
154 self.dropped_msgs_ = 0
155 self.window_start = rospy.Time.now()
156
157 # temporary variables
158 self.stat_bytes_last_ = 0
159 self.stat_bytes_window_ = 0
160
161 def sendStatistics(self, subscriber_statistics_logger):
162 """
163 Send out statistics. Aggregate collected stats information.
164
165 Currently done blocking. Might be moved to own thread later. But at the moment
166 any computation done here should be rather quick.
167 """
168 curtime = rospy.Time.now()
169
170 msg = TopicStatistics()
171 msg.topic = self.topic
172 msg.node_sub = self.subscriber
173 msg.node_pub = self.publisher
174
175 msg.window_start = self.window_start
176 msg.window_stop = curtime
177
178 msg.traffic = self.stat_bytes_window_
179
180 msg.delivered_msgs = len(self.arrival_time_list_)
181 msg.dropped_msgs = self.dropped_msgs_
182
183 # we can only calculate message age if the messages did contain Header fields.
184 if len(self.age_list_) > 0:
185 msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))
186 variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)
187 msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))
188 msg.stamp_age_max = max(self.age_list_)
189 else:
190 msg.stamp_age_mean = rospy.Duration(0)
191 msg.stamp_age_stddev = rospy.Duration(0)
192 msg.stamp_age_max = rospy.Duration(0)
193
194 # computer period/frequency. we need at least two messages within the window to do this.
195 if len(self.arrival_time_list_) > 1:
196 periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]
197 msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))
198 variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)
199 msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))
200 msg.period_max = max(periods)
201 else:
202 msg.period_mean = rospy.Duration(0)
203 msg.period_stddev = rospy.Duration(0)
204 msg.period_max = rospy.Duration(0)
205
206 self.pub.publish(msg)
207
208 # adjust window, if message count is not appropriate.
209 if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:
210 self.pub_frequency *= 2
211 if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:
212 self.pub_frequency /= 2
213
214 # clear collected stats, start new window.
215 self.age_list_ = []
216 self.arrival_time_list_ = []
217 self.dropped_msgs_ = 0
218
219 self.window_start = curtime
220
221 def callback(self, subscriber_statistics_logger, msg, stat_bytes):
222 """
223 This method is called for every message, that is received on this
224 subscriber.
225
226 this callback will keep some statistics and publish the results
227 periodically on a topic. the publishing should probably be done
228 asynchronically in another thread.
229
230 @param msg: The message, that has been received. The message has usually
231 been already deserialized. However this is not always the case. (AnyMsg)
232 @param stat_bytes: A counter, how many bytes have been moved across
233 this connection since it exists.
234
235 Any computing-heavy stuff should be done somewhere else, as this
236 callback has to return before the message is delivered to the user.
237 """
238
239 arrival_time = rospy.Time.now()
240
241 self.arrival_time_list_.append(arrival_time)
242
243 # Calculate how many bytes of traffic did this message need?
244 self.stat_bytes_window_ = stat_bytes - self.stat_bytes_last_
245 self.stat_bytes_last_ = stat_bytes
246
247 # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.
248 # Those subscribers won't have a header. But as these subscribers are rather rare
249 # ("rostopic hz" is the only one I know of), I'm gonna ignore them.
250 if msg._has_header:
251 self.age_list_.append(arrival_time - msg.header.stamp)
252
253 if self.last_seq_ + 1 != msg.header.seq:
254 self.dropped_msgs_ = self.dropped_msgs_ + 1
255 self.last_seq_ = msg.header.seq
256
257 # send out statistics with a certain frequency
258 if self.last_pub_time + self.pub_frequency < arrival_time:
259 self.last_pub_time = arrival_time
260 self.sendStatistics(subscriber_statistics_logger)
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/clients/rospy/src/rospy/impl/statistics.py b/clients/rospy/src/rospy/impl/statistics.py
--- a/clients/rospy/src/rospy/impl/statistics.py
+++ b/clients/rospy/src/rospy/impl/statistics.py
@@ -175,7 +175,8 @@
msg.window_start = self.window_start
msg.window_stop = curtime
- msg.traffic = self.stat_bytes_window_
+ # Calculate bytes since last message
+ msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_
msg.delivered_msgs = len(self.arrival_time_list_)
msg.dropped_msgs = self.dropped_msgs_
@@ -218,6 +219,8 @@
self.window_start = curtime
+ self.stat_bytes_last_ = self.stat_bytes_window_
+
def callback(self, subscriber_statistics_logger, msg, stat_bytes):
"""
This method is called for every message, that is received on this
@@ -240,9 +243,7 @@
self.arrival_time_list_.append(arrival_time)
- # Calculate how many bytes of traffic did this message need?
- self.stat_bytes_window_ = stat_bytes - self.stat_bytes_last_
- self.stat_bytes_last_ = stat_bytes
+ self.stat_bytes_window_ = stat_bytes
# rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.
# Those subscribers won't have a header. But as these subscribers are rather rare
|
{"golden_diff": "diff --git a/clients/rospy/src/rospy/impl/statistics.py b/clients/rospy/src/rospy/impl/statistics.py\n--- a/clients/rospy/src/rospy/impl/statistics.py\n+++ b/clients/rospy/src/rospy/impl/statistics.py\n@@ -175,7 +175,8 @@\n msg.window_start = self.window_start\n msg.window_stop = curtime\n \n- msg.traffic = self.stat_bytes_window_\n+ # Calculate bytes since last message\n+ msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_\n \n msg.delivered_msgs = len(self.arrival_time_list_)\n msg.dropped_msgs = self.dropped_msgs_\n@@ -218,6 +219,8 @@\n \n self.window_start = curtime\n \n+ self.stat_bytes_last_ = self.stat_bytes_window_\n+\n def callback(self, subscriber_statistics_logger, msg, stat_bytes):\n \"\"\"\n This method is called for every message, that is received on this\n@@ -240,9 +243,7 @@\n \n self.arrival_time_list_.append(arrival_time)\n \n- # Calculate how many bytes of traffic did this message need?\n- self.stat_bytes_window_ = stat_bytes - self.stat_bytes_last_\n- self.stat_bytes_last_ = stat_bytes\n+ self.stat_bytes_window_ = stat_bytes\n \n # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.\n # Those subscribers won't have a header. But as these subscribers are rather rare\n", "issue": "traffic of statistics is wrong\nThe entry traffic in statistics seems to get calculated wrong.\n\n**Example setup:**\n\n```\n$ rosparam set enable_statistics true\n```\n\n```\n$ rostopic pub /table std_msgs/String \"data: '1234' \" -r 100\n```\n\n```\n$ rostopic bw /table\nsubscribed to [/table]\naverage: 805.20B/s\n mean: 8.00B min: 8.00B max: 8.00B window: 99\n```\n\n```\n$ rostopic echo /statistics\ntopic: /table\nnode_pub: /rostopic_3963_1411225760462\nnode_sub: /rostopic_4239_1411226004615\nwindow_start: \n secs: 1411226011\n nsecs: 883635044\nwindow_stop: \n secs: 1411226019\n nsecs: 886126995\ndelivered_msgs: 800\ndropped_msgs: 0\ntraffic: 12\n[...]\n```\n\nWhereas traffic in this example should be ~ 8\\* 800Byte = **1600 Byte** but is **12 Byte**.\n\nChanging the publish frequency does not lead in a change in traffic. \n\n", "before_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2013-2014 Dariush Forouher\n# All rights reserved.\n#\n# Based on code adapted from diagnostics_updater by Blaise Gassend\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom math import sqrt\nimport logging\nimport sys\n\nfrom rosgraph_msgs.msg import TopicStatistics\nimport rospy\n\n_logger = logging.getLogger('rospy.impl.statistics')\n\n\nclass SubscriberStatisticsLogger():\n \"\"\"\n Class that monitors each subscriber.\n\n this class basically just keeps a collection of ConnectionStatisticsLogger.\n \"\"\"\n\n def __init__(self, subscriber):\n self.subscriber = subscriber\n self.connections = dict()\n self.read_parameters()\n\n def read_parameters(self):\n \"\"\"\n Fetch window parameters from parameter server\n \"\"\"\n\n # disable statistics if node can't talk to parameter server which is the case in unit tests\n try:\n self.enabled = rospy.get_param(\"/enable_statistics\", False)\n except:\n self.enabled = False\n return\n\n # Range of window length, in seconds\n self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n\n # Range of acceptable messages in window.\n # Window size will be adjusted if number of observed is\n # outside this range.\n self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n self.min_window = rospy.get_param(\"/statistics_window_min_size\", 4)\n\n def is_enable_statistics(self):\n return self.enabled\n\n def callback(self, msg, publisher, stat_bytes):\n \"\"\"\n This method is called for every message that has been received.\n\n @param msg: The message received.\n @param publisher: The name of the publisher node that sent the msg\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n This method just looks up the ConnectionStatisticsLogger for the specific connection\n between publisher and subscriber and delegates to statistics logging to that\n instance.\n \"\"\"\n\n if not self.enabled:\n return\n\n # /clock is special, as it is subscribed very early\n # also exclude /statistics to reduce noise.\n if self.subscriber.name == \"/clock\" or self.subscriber.name == \"/statistics\":\n return\n\n try:\n # create ConnectionStatisticsLogger for new connections\n logger = self.connections.get(publisher)\n if logger is None:\n logger = ConnectionStatisticsLogger(self.subscriber.name, rospy.get_name(), publisher)\n self.connections[publisher] = logger\n\n # delegate stuff to that instance\n logger.callback(self, msg, stat_bytes)\n except Exception as e:\n rospy.logerr(\"Unexpected error during statistics measurement: %s\", str(e))\n\n\nclass ConnectionStatisticsLogger():\n \"\"\"\n Class that monitors lots of stuff for each connection.\n\n is created whenever a subscriber is created.\n is destroyed whenever its parent subscriber is destroyed.\n its lifecycle is therefore bound to its parent subscriber.\n \"\"\"\n\n def __init__(self, topic, subscriber, publisher):\n \"\"\"\n Constructor.\n\n @param topic: Name of the topic\n @param subscriber: Name of the subscriber\n @param publisher: Name of the publisher\n\n These three should uniquely identify the connection.\n \"\"\"\n\n self.topic = topic\n self.subscriber = subscriber\n self.publisher = publisher\n\n self.pub = rospy.Publisher(\"/statistics\", TopicStatistics, queue_size=10)\n\n # reset window\n self.last_pub_time = rospy.Time(0)\n self.pub_frequency = rospy.Duration(1.0)\n\n # timestamp age\n self.age_list_ = []\n\n # period calculations\n self.arrival_time_list_ = []\n\n self.last_seq_ = 0\n self.dropped_msgs_ = 0\n self.window_start = rospy.Time.now()\n\n # temporary variables\n self.stat_bytes_last_ = 0\n self.stat_bytes_window_ = 0\n\n def sendStatistics(self, subscriber_statistics_logger):\n \"\"\"\n Send out statistics. Aggregate collected stats information.\n\n Currently done blocking. Might be moved to own thread later. But at the moment\n any computation done here should be rather quick.\n \"\"\"\n curtime = rospy.Time.now()\n\n msg = TopicStatistics()\n msg.topic = self.topic\n msg.node_sub = self.subscriber\n msg.node_pub = self.publisher\n\n msg.window_start = self.window_start\n msg.window_stop = curtime\n\n msg.traffic = self.stat_bytes_window_\n\n msg.delivered_msgs = len(self.arrival_time_list_)\n msg.dropped_msgs = self.dropped_msgs_\n\n # we can only calculate message age if the messages did contain Header fields.\n if len(self.age_list_) > 0:\n msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))\n variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)\n msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.stamp_age_max = max(self.age_list_)\n else:\n msg.stamp_age_mean = rospy.Duration(0)\n msg.stamp_age_stddev = rospy.Duration(0)\n msg.stamp_age_max = rospy.Duration(0)\n\n # computer period/frequency. we need at least two messages within the window to do this.\n if len(self.arrival_time_list_) > 1:\n periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]\n msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))\n variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)\n msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.period_max = max(periods)\n else:\n msg.period_mean = rospy.Duration(0)\n msg.period_stddev = rospy.Duration(0)\n msg.period_max = rospy.Duration(0)\n\n self.pub.publish(msg)\n\n # adjust window, if message count is not appropriate.\n if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:\n self.pub_frequency *= 2\n if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:\n self.pub_frequency /= 2\n\n # clear collected stats, start new window.\n self.age_list_ = []\n self.arrival_time_list_ = []\n self.dropped_msgs_ = 0\n\n self.window_start = curtime\n\n def callback(self, subscriber_statistics_logger, msg, stat_bytes):\n \"\"\"\n This method is called for every message, that is received on this\n subscriber.\n\n this callback will keep some statistics and publish the results\n periodically on a topic. the publishing should probably be done\n asynchronically in another thread.\n\n @param msg: The message, that has been received. The message has usually\n been already deserialized. However this is not always the case. (AnyMsg)\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n Any computing-heavy stuff should be done somewhere else, as this\n callback has to return before the message is delivered to the user.\n \"\"\"\n\n arrival_time = rospy.Time.now()\n\n self.arrival_time_list_.append(arrival_time)\n\n # Calculate how many bytes of traffic did this message need?\n self.stat_bytes_window_ = stat_bytes - self.stat_bytes_last_\n self.stat_bytes_last_ = stat_bytes\n\n # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.\n # Those subscribers won't have a header. But as these subscribers are rather rare\n # (\"rostopic hz\" is the only one I know of), I'm gonna ignore them.\n if msg._has_header:\n self.age_list_.append(arrival_time - msg.header.stamp)\n\n if self.last_seq_ + 1 != msg.header.seq:\n self.dropped_msgs_ = self.dropped_msgs_ + 1\n self.last_seq_ = msg.header.seq\n\n # send out statistics with a certain frequency\n if self.last_pub_time + self.pub_frequency < arrival_time:\n self.last_pub_time = arrival_time\n self.sendStatistics(subscriber_statistics_logger)\n", "path": "clients/rospy/src/rospy/impl/statistics.py"}], "after_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2013-2014 Dariush Forouher\n# All rights reserved.\n#\n# Based on code adapted from diagnostics_updater by Blaise Gassend\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom math import sqrt\nimport logging\nimport sys\n\nfrom rosgraph_msgs.msg import TopicStatistics\nimport rospy\n\n_logger = logging.getLogger('rospy.impl.statistics')\n\n\nclass SubscriberStatisticsLogger():\n \"\"\"\n Class that monitors each subscriber.\n\n this class basically just keeps a collection of ConnectionStatisticsLogger.\n \"\"\"\n\n def __init__(self, subscriber):\n self.subscriber = subscriber\n self.connections = dict()\n self.read_parameters()\n\n def read_parameters(self):\n \"\"\"\n Fetch window parameters from parameter server\n \"\"\"\n\n # disable statistics if node can't talk to parameter server which is the case in unit tests\n try:\n self.enabled = rospy.get_param(\"/enable_statistics\", False)\n except:\n self.enabled = False\n return\n\n # Range of window length, in seconds\n self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n\n # Range of acceptable messages in window.\n # Window size will be adjusted if number of observed is\n # outside this range.\n self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n self.min_window = rospy.get_param(\"/statistics_window_min_size\", 4)\n\n def is_enable_statistics(self):\n return self.enabled\n\n def callback(self, msg, publisher, stat_bytes):\n \"\"\"\n This method is called for every message that has been received.\n\n @param msg: The message received.\n @param publisher: The name of the publisher node that sent the msg\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n This method just looks up the ConnectionStatisticsLogger for the specific connection\n between publisher and subscriber and delegates to statistics logging to that\n instance.\n \"\"\"\n\n if not self.enabled:\n return\n\n # /clock is special, as it is subscribed very early\n # also exclude /statistics to reduce noise.\n if self.subscriber.name == \"/clock\" or self.subscriber.name == \"/statistics\":\n return\n\n try:\n # create ConnectionStatisticsLogger for new connections\n logger = self.connections.get(publisher)\n if logger is None:\n logger = ConnectionStatisticsLogger(self.subscriber.name, rospy.get_name(), publisher)\n self.connections[publisher] = logger\n\n # delegate stuff to that instance\n logger.callback(self, msg, stat_bytes)\n except Exception as e:\n rospy.logerr(\"Unexpected error during statistics measurement: %s\", str(e))\n\n\nclass ConnectionStatisticsLogger():\n \"\"\"\n Class that monitors lots of stuff for each connection.\n\n is created whenever a subscriber is created.\n is destroyed whenever its parent subscriber is destroyed.\n its lifecycle is therefore bound to its parent subscriber.\n \"\"\"\n\n def __init__(self, topic, subscriber, publisher):\n \"\"\"\n Constructor.\n\n @param topic: Name of the topic\n @param subscriber: Name of the subscriber\n @param publisher: Name of the publisher\n\n These three should uniquely identify the connection.\n \"\"\"\n\n self.topic = topic\n self.subscriber = subscriber\n self.publisher = publisher\n\n self.pub = rospy.Publisher(\"/statistics\", TopicStatistics, queue_size=10)\n\n # reset window\n self.last_pub_time = rospy.Time(0)\n self.pub_frequency = rospy.Duration(1.0)\n\n # timestamp age\n self.age_list_ = []\n\n # period calculations\n self.arrival_time_list_ = []\n\n self.last_seq_ = 0\n self.dropped_msgs_ = 0\n self.window_start = rospy.Time.now()\n\n # temporary variables\n self.stat_bytes_last_ = 0\n self.stat_bytes_window_ = 0\n\n def sendStatistics(self, subscriber_statistics_logger):\n \"\"\"\n Send out statistics. Aggregate collected stats information.\n\n Currently done blocking. Might be moved to own thread later. But at the moment\n any computation done here should be rather quick.\n \"\"\"\n curtime = rospy.Time.now()\n\n msg = TopicStatistics()\n msg.topic = self.topic\n msg.node_sub = self.subscriber\n msg.node_pub = self.publisher\n\n msg.window_start = self.window_start\n msg.window_stop = curtime\n\n # Calculate bytes since last message\n msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_\n\n msg.delivered_msgs = len(self.arrival_time_list_)\n msg.dropped_msgs = self.dropped_msgs_\n\n # we can only calculate message age if the messages did contain Header fields.\n if len(self.age_list_) > 0:\n msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))\n variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)\n msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.stamp_age_max = max(self.age_list_)\n else:\n msg.stamp_age_mean = rospy.Duration(0)\n msg.stamp_age_stddev = rospy.Duration(0)\n msg.stamp_age_max = rospy.Duration(0)\n\n # computer period/frequency. we need at least two messages within the window to do this.\n if len(self.arrival_time_list_) > 1:\n periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]\n msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))\n variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)\n msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.period_max = max(periods)\n else:\n msg.period_mean = rospy.Duration(0)\n msg.period_stddev = rospy.Duration(0)\n msg.period_max = rospy.Duration(0)\n\n self.pub.publish(msg)\n\n # adjust window, if message count is not appropriate.\n if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:\n self.pub_frequency *= 2\n if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:\n self.pub_frequency /= 2\n\n # clear collected stats, start new window.\n self.age_list_ = []\n self.arrival_time_list_ = []\n self.dropped_msgs_ = 0\n\n self.window_start = curtime\n\n self.stat_bytes_last_ = self.stat_bytes_window_\n\n def callback(self, subscriber_statistics_logger, msg, stat_bytes):\n \"\"\"\n This method is called for every message, that is received on this\n subscriber.\n\n this callback will keep some statistics and publish the results\n periodically on a topic. the publishing should probably be done\n asynchronically in another thread.\n\n @param msg: The message, that has been received. The message has usually\n been already deserialized. However this is not always the case. (AnyMsg)\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n Any computing-heavy stuff should be done somewhere else, as this\n callback has to return before the message is delivered to the user.\n \"\"\"\n\n arrival_time = rospy.Time.now()\n\n self.arrival_time_list_.append(arrival_time)\n\n self.stat_bytes_window_ = stat_bytes\n\n # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.\n # Those subscribers won't have a header. But as these subscribers are rather rare\n # (\"rostopic hz\" is the only one I know of), I'm gonna ignore them.\n if msg._has_header:\n self.age_list_.append(arrival_time - msg.header.stamp)\n\n if self.last_seq_ + 1 != msg.header.seq:\n self.dropped_msgs_ = self.dropped_msgs_ + 1\n self.last_seq_ = msg.header.seq\n\n # send out statistics with a certain frequency\n if self.last_pub_time + self.pub_frequency < arrival_time:\n self.last_pub_time = arrival_time\n self.sendStatistics(subscriber_statistics_logger)\n", "path": "clients/rospy/src/rospy/impl/statistics.py"}]}
| 3,485 | 346 |
gh_patches_debug_6457
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-6562
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proxmox_task_info api_password required along with api_user
### Summary
Cannot use the module with token id and secret only:
```
module = AnsibleModule(
argument_spec=module_args,
required_together=[('api_token_id', 'api_token_secret'),
('api_user', 'api_password')],
required_one_of=[('api_password', 'api_token_id')],
supports_check_mode=True)
result = dict(changed=False)
```
### Issue Type
Feature Request
### Component Name
proxmox_tasks_info
### Ansible Version
```ansible 2.10.8
config file = /opt/git/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
executable location = /usr/bin/ansible
python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]
```
### Community.general Version
```console (paste below)
# /usr/lib/python3/dist-packages/ansible_collections
Collection Version
----------------- -------
community.general 1.3.6
# /root/.ansible/collections/ansible_collections
Collection Version
----------------- -------
community.general 6.3.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
_No response_
### Steps to Reproduce
```
- name: Check proxmox task
community.general.proxmox_tasks_info:
api_host: "{{ api_host }}"
api_user: "{{ api_user }}"
api_token_id: "{{ api_token_id }}"
api_token_secret: "{{ api_token_secret }}"
node: "{{ node }}"
task: "{{ task }}"
"msg": "parameters are required together: api_user, api_password"
```
### Expected Results
If I comment out the required_together part for api_password and api_host the task is successful.
### Actual Results
```
The full traceback is:
File "/tmp/ansible_community.general.proxmox_tasks_info_payload_j17m_215/ansible_community.general.proxmox_tasks_info_payload.zip/ansible/module_utils/basic.py", line 1626, in _check_required_together
check_required_together(spec, param)
File "/tmp/ansible_community.general.proxmox_tasks_info_payload_j17m_215/ansible_community.general.proxmox_tasks_info_payload.zip/ansible/module_utils/common/validation.py", line 128, in check_required_together
raise TypeError(to_native(msg))
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/proxmox_tasks_info.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
5 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
6 # SPDX-License-Identifier: GPL-3.0-or-later
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r'''
13 ---
14 module: proxmox_tasks_info
15 short_description: Retrieve information about one or more Proxmox VE tasks
16 version_added: 3.8.0
17 description:
18 - Retrieve information about one or more Proxmox VE tasks.
19 author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
20 options:
21 node:
22 description:
23 - Node where to get tasks.
24 required: true
25 type: str
26 task:
27 description:
28 - Return specific task.
29 aliases: ['upid', 'name']
30 type: str
31 extends_documentation_fragment:
32 - community.general.proxmox.documentation
33 - community.general.attributes
34 - community.general.attributes.info_module
35 '''
36
37
38 EXAMPLES = '''
39 - name: List tasks on node01
40 community.general.proxmox_task_info:
41 api_host: proxmoxhost
42 api_user: root@pam
43 api_password: '{{ password | default(omit) }}'
44 api_token_id: '{{ token_id | default(omit) }}'
45 api_token_secret: '{{ token_secret | default(omit) }}'
46 node: node01
47 register: result
48
49 - name: Retrieve information about specific tasks on node01
50 community.general.proxmox_task_info:
51 api_host: proxmoxhost
52 api_user: root@pam
53 api_password: '{{ password | default(omit) }}'
54 api_token_id: '{{ token_id | default(omit) }}'
55 api_token_secret: '{{ token_secret | default(omit) }}'
56 task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
57 node: node01
58 register: proxmox_tasks
59 '''
60
61
62 RETURN = '''
63 proxmox_tasks:
64 description: List of tasks.
65 returned: on success
66 type: list
67 elements: dict
68 contains:
69 id:
70 description: ID of the task.
71 returned: on success
72 type: str
73 node:
74 description: Node name.
75 returned: on success
76 type: str
77 pid:
78 description: PID of the task.
79 returned: on success
80 type: int
81 pstart:
82 description: pastart of the task.
83 returned: on success
84 type: int
85 starttime:
86 description: Starting time of the task.
87 returned: on success
88 type: int
89 type:
90 description: Type of the task.
91 returned: on success
92 type: str
93 upid:
94 description: UPID of the task.
95 returned: on success
96 type: str
97 user:
98 description: User that owns the task.
99 returned: on success
100 type: str
101 endtime:
102 description: Endtime of the task.
103 returned: on success, can be absent
104 type: int
105 status:
106 description: Status of the task.
107 returned: on success, can be absent
108 type: str
109 failed:
110 description: If the task failed.
111 returned: when status is defined
112 type: bool
113 msg:
114 description: Short message.
115 returned: on failure
116 type: str
117 sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
118 '''
119
120 from ansible.module_utils.basic import AnsibleModule
121 from ansible_collections.community.general.plugins.module_utils.proxmox import (
122 proxmox_auth_argument_spec, ProxmoxAnsible)
123
124
125 class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
126 def get_task(self, upid, node):
127 tasks = self.get_tasks(node)
128 for task in tasks:
129 if task.info['upid'] == upid:
130 return [task]
131
132 def get_tasks(self, node):
133 tasks = self.proxmox_api.nodes(node).tasks.get()
134 return [ProxmoxTask(task) for task in tasks]
135
136
137 class ProxmoxTask:
138 def __init__(self, task):
139 self.info = dict()
140 for k, v in task.items():
141 if k == 'status' and isinstance(v, str):
142 self.info[k] = v
143 if v != 'OK':
144 self.info['failed'] = True
145 else:
146 self.info[k] = v
147
148
149 def proxmox_task_info_argument_spec():
150 return dict(
151 task=dict(type='str', aliases=['upid', 'name'], required=False),
152 node=dict(type='str', required=True),
153 )
154
155
156 def main():
157 module_args = proxmox_auth_argument_spec()
158 task_info_args = proxmox_task_info_argument_spec()
159 module_args.update(task_info_args)
160
161 module = AnsibleModule(
162 argument_spec=module_args,
163 required_together=[('api_token_id', 'api_token_secret'),
164 ('api_user', 'api_password')],
165 required_one_of=[('api_password', 'api_token_id')],
166 supports_check_mode=True)
167 result = dict(changed=False)
168
169 proxmox = ProxmoxTaskInfoAnsible(module)
170 upid = module.params['task']
171 node = module.params['node']
172 if upid:
173 tasks = proxmox.get_task(upid=upid, node=node)
174 else:
175 tasks = proxmox.get_tasks(node=node)
176 if tasks is not None:
177 result['proxmox_tasks'] = [task.info for task in tasks]
178 module.exit_json(**result)
179 else:
180 result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
181 upid, node)
182 module.fail_json(**result)
183
184
185 if __name__ == '__main__':
186 main()
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py
--- a/plugins/modules/proxmox_tasks_info.py
+++ b/plugins/modules/proxmox_tasks_info.py
@@ -160,8 +160,7 @@
module = AnsibleModule(
argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret'),
- ('api_user', 'api_password')],
+ required_together=[('api_token_id', 'api_token_secret')],
required_one_of=[('api_password', 'api_token_id')],
supports_check_mode=True)
result = dict(changed=False)
|
{"golden_diff": "diff --git a/plugins/modules/proxmox_tasks_info.py b/plugins/modules/proxmox_tasks_info.py\n--- a/plugins/modules/proxmox_tasks_info.py\n+++ b/plugins/modules/proxmox_tasks_info.py\n@@ -160,8 +160,7 @@\n \n module = AnsibleModule(\n argument_spec=module_args,\n- required_together=[('api_token_id', 'api_token_secret'),\n- ('api_user', 'api_password')],\n+ required_together=[('api_token_id', 'api_token_secret')],\n required_one_of=[('api_password', 'api_token_id')],\n supports_check_mode=True)\n result = dict(changed=False)\n", "issue": "Proxmox_task_info api_password required along with api_user\n### Summary\r\n\r\nCannot use the module with token id and secret only:\r\n\r\n``` \r\n module = AnsibleModule(\r\n argument_spec=module_args,\r\n required_together=[('api_token_id', 'api_token_secret'),\r\n ('api_user', 'api_password')],\r\n required_one_of=[('api_password', 'api_token_id')],\r\n supports_check_mode=True)\r\n result = dict(changed=False)\r\n```\r\n\r\n### Issue Type\r\n\r\nFeature Request\r\n\r\n### Component Name\r\n\r\nproxmox_tasks_info\r\n\r\n### Ansible Version\r\n\r\n```ansible 2.10.8\r\n config file = /opt/git/ansible.cfg\r\n configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n executable location = /usr/bin/ansible \r\n python version = 3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]\r\n\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n# /usr/lib/python3/dist-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 1.3.6\r\n\r\n# /root/.ansible/collections/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 6.3.0\r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\n_No response_\r\n\r\n### Steps to Reproduce\r\n\r\n\r\n```\r\n- name: Check proxmox task\r\n community.general.proxmox_tasks_info:\r\n api_host: \"{{ api_host }}\"\r\n api_user: \"{{ api_user }}\"\r\n api_token_id: \"{{ api_token_id }}\"\r\n api_token_secret: \"{{ api_token_secret }}\"\r\n node: \"{{ node }}\"\r\n task: \"{{ task }}\"\r\n\r\n\"msg\": \"parameters are required together: api_user, api_password\"\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nIf I comment out the required_together part for api_password and api_host the task is successful.\r\n\r\n### Actual Results\r\n\r\n```\r\nThe full traceback is:\r\n File \"/tmp/ansible_community.general.proxmox_tasks_info_payload_j17m_215/ansible_community.general.proxmox_tasks_info_payload.zip/ansible/module_utils/basic.py\", line 1626, in _check_required_together\r\n check_required_together(spec, param)\r\n File \"/tmp/ansible_community.general.proxmox_tasks_info_payload_j17m_215/ansible_community.general.proxmox_tasks_info_payload.zip/ansible/module_utils/common/validation.py\", line 128, in check_required_together\r\n raise TypeError(to_native(msg))\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: proxmox_tasks_info\nshort_description: Retrieve information about one or more Proxmox VE tasks\nversion_added: 3.8.0\ndescription:\n - Retrieve information about one or more Proxmox VE tasks.\nauthor: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'\noptions:\n node:\n description:\n - Node where to get tasks.\n required: true\n type: str\n task:\n description:\n - Return specific task.\n aliases: ['upid', 'name']\n type: str\nextends_documentation_fragment:\n - community.general.proxmox.documentation\n - community.general.attributes\n - community.general.attributes.info_module\n'''\n\n\nEXAMPLES = '''\n- name: List tasks on node01\n community.general.proxmox_task_info:\n api_host: proxmoxhost\n api_user: root@pam\n api_password: '{{ password | default(omit) }}'\n api_token_id: '{{ token_id | default(omit) }}'\n api_token_secret: '{{ token_secret | default(omit) }}'\n node: node01\n register: result\n\n- name: Retrieve information about specific tasks on node01\n community.general.proxmox_task_info:\n api_host: proxmoxhost\n api_user: root@pam\n api_password: '{{ password | default(omit) }}'\n api_token_id: '{{ token_id | default(omit) }}'\n api_token_secret: '{{ token_secret | default(omit) }}'\n task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'\n node: node01\n register: proxmox_tasks\n'''\n\n\nRETURN = '''\nproxmox_tasks:\n description: List of tasks.\n returned: on success\n type: list\n elements: dict\n contains:\n id:\n description: ID of the task.\n returned: on success\n type: str\n node:\n description: Node name.\n returned: on success\n type: str\n pid:\n description: PID of the task.\n returned: on success\n type: int\n pstart:\n description: pastart of the task.\n returned: on success\n type: int\n starttime:\n description: Starting time of the task.\n returned: on success\n type: int\n type:\n description: Type of the task.\n returned: on success\n type: str\n upid:\n description: UPID of the task.\n returned: on success\n type: str\n user:\n description: User that owns the task.\n returned: on success\n type: str\n endtime:\n description: Endtime of the task.\n returned: on success, can be absent\n type: int\n status:\n description: Status of the task.\n returned: on success, can be absent\n type: str\n failed:\n description: If the task failed.\n returned: when status is defined\n type: bool\nmsg:\n description: Short message.\n returned: on failure\n type: str\n sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.general.plugins.module_utils.proxmox import (\n proxmox_auth_argument_spec, ProxmoxAnsible)\n\n\nclass ProxmoxTaskInfoAnsible(ProxmoxAnsible):\n def get_task(self, upid, node):\n tasks = self.get_tasks(node)\n for task in tasks:\n if task.info['upid'] == upid:\n return [task]\n\n def get_tasks(self, node):\n tasks = self.proxmox_api.nodes(node).tasks.get()\n return [ProxmoxTask(task) for task in tasks]\n\n\nclass ProxmoxTask:\n def __init__(self, task):\n self.info = dict()\n for k, v in task.items():\n if k == 'status' and isinstance(v, str):\n self.info[k] = v\n if v != 'OK':\n self.info['failed'] = True\n else:\n self.info[k] = v\n\n\ndef proxmox_task_info_argument_spec():\n return dict(\n task=dict(type='str', aliases=['upid', 'name'], required=False),\n node=dict(type='str', required=True),\n )\n\n\ndef main():\n module_args = proxmox_auth_argument_spec()\n task_info_args = proxmox_task_info_argument_spec()\n module_args.update(task_info_args)\n\n module = AnsibleModule(\n argument_spec=module_args,\n required_together=[('api_token_id', 'api_token_secret'),\n ('api_user', 'api_password')],\n required_one_of=[('api_password', 'api_token_id')],\n supports_check_mode=True)\n result = dict(changed=False)\n\n proxmox = ProxmoxTaskInfoAnsible(module)\n upid = module.params['task']\n node = module.params['node']\n if upid:\n tasks = proxmox.get_task(upid=upid, node=node)\n else:\n tasks = proxmox.get_tasks(node=node)\n if tasks is not None:\n result['proxmox_tasks'] = [task.info for task in tasks]\n module.exit_json(**result)\n else:\n result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(\n upid, node)\n module.fail_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/proxmox_tasks_info.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: proxmox_tasks_info\nshort_description: Retrieve information about one or more Proxmox VE tasks\nversion_added: 3.8.0\ndescription:\n - Retrieve information about one or more Proxmox VE tasks.\nauthor: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'\noptions:\n node:\n description:\n - Node where to get tasks.\n required: true\n type: str\n task:\n description:\n - Return specific task.\n aliases: ['upid', 'name']\n type: str\nextends_documentation_fragment:\n - community.general.proxmox.documentation\n - community.general.attributes\n - community.general.attributes.info_module\n'''\n\n\nEXAMPLES = '''\n- name: List tasks on node01\n community.general.proxmox_task_info:\n api_host: proxmoxhost\n api_user: root@pam\n api_password: '{{ password | default(omit) }}'\n api_token_id: '{{ token_id | default(omit) }}'\n api_token_secret: '{{ token_secret | default(omit) }}'\n node: node01\n register: result\n\n- name: Retrieve information about specific tasks on node01\n community.general.proxmox_task_info:\n api_host: proxmoxhost\n api_user: root@pam\n api_password: '{{ password | default(omit) }}'\n api_token_id: '{{ token_id | default(omit) }}'\n api_token_secret: '{{ token_secret | default(omit) }}'\n task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'\n node: node01\n register: proxmox_tasks\n'''\n\n\nRETURN = '''\nproxmox_tasks:\n description: List of tasks.\n returned: on success\n type: list\n elements: dict\n contains:\n id:\n description: ID of the task.\n returned: on success\n type: str\n node:\n description: Node name.\n returned: on success\n type: str\n pid:\n description: PID of the task.\n returned: on success\n type: int\n pstart:\n description: pastart of the task.\n returned: on success\n type: int\n starttime:\n description: Starting time of the task.\n returned: on success\n type: int\n type:\n description: Type of the task.\n returned: on success\n type: str\n upid:\n description: UPID of the task.\n returned: on success\n type: str\n user:\n description: User that owns the task.\n returned: on success\n type: str\n endtime:\n description: Endtime of the task.\n returned: on success, can be absent\n type: int\n status:\n description: Status of the task.\n returned: on success, can be absent\n type: str\n failed:\n description: If the task failed.\n returned: when status is defined\n type: bool\nmsg:\n description: Short message.\n returned: on failure\n type: str\n sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.general.plugins.module_utils.proxmox import (\n proxmox_auth_argument_spec, ProxmoxAnsible)\n\n\nclass ProxmoxTaskInfoAnsible(ProxmoxAnsible):\n def get_task(self, upid, node):\n tasks = self.get_tasks(node)\n for task in tasks:\n if task.info['upid'] == upid:\n return [task]\n\n def get_tasks(self, node):\n tasks = self.proxmox_api.nodes(node).tasks.get()\n return [ProxmoxTask(task) for task in tasks]\n\n\nclass ProxmoxTask:\n def __init__(self, task):\n self.info = dict()\n for k, v in task.items():\n if k == 'status' and isinstance(v, str):\n self.info[k] = v\n if v != 'OK':\n self.info['failed'] = True\n else:\n self.info[k] = v\n\n\ndef proxmox_task_info_argument_spec():\n return dict(\n task=dict(type='str', aliases=['upid', 'name'], required=False),\n node=dict(type='str', required=True),\n )\n\n\ndef main():\n module_args = proxmox_auth_argument_spec()\n task_info_args = proxmox_task_info_argument_spec()\n module_args.update(task_info_args)\n\n module = AnsibleModule(\n argument_spec=module_args,\n required_together=[('api_token_id', 'api_token_secret')],\n required_one_of=[('api_password', 'api_token_id')],\n supports_check_mode=True)\n result = dict(changed=False)\n\n proxmox = ProxmoxTaskInfoAnsible(module)\n upid = module.params['task']\n node = module.params['node']\n if upid:\n tasks = proxmox.get_task(upid=upid, node=node)\n else:\n tasks = proxmox.get_tasks(node=node)\n if tasks is not None:\n result['proxmox_tasks'] = [task.info for task in tasks]\n module.exit_json(**result)\n else:\n result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(\n upid, node)\n module.fail_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/proxmox_tasks_info.py"}]}
| 2,700 | 143 |
gh_patches_debug_12257
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-6745
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FreeIPA LDAP sync broken in 2023.8.*
**Describe the bug**
After upgrading Authentik to `2023.8.0` (and `2023.8.1`), the LDAP sync with FreeIPA seems to be broken. The `ldap_sync:freeipa:groups` tasks complete without issues, but the `ldap_sync:freeipa:users` task fails:
```
ldap_sync:freeipa:users:cd24b105-15d6-4d2b-85bc-29492f0f86a8
Task finished with errors
["“['TRUE']” value must be either True or False."]
```
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'if/admin/#/core/sources'
2. Click on the FreeIPA LDAP source
3. Click "Run Sync Again"
**Expected behavior**
It is expected that the sync completes without errors
**Screenshots**

**Logs**
Error log from `if/admin/#/events/log` --> `System task exception: authentik.events.monitored_tasks`:
```
Task ldap_sync encountered an error: Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/celery/app/trace.py", line 477, in trace_task
R = retval = fun(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py", line 275, in _inner
reraise(*exc_info)
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/_compat.py", line 60, in reraise
raise value
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py", line 270, in _inner
return f(*args, **kwargs)
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/celery/app/trace.py", line 760, in __protected_call__
return self.run(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/sources/ldap/tasks.py", line 97, in ldap_sync
count = sync_inst.sync(page)
^^^^^^^^^^^^^^^^^^^^
File "/authentik/sources/ldap/sync/users.py", line 73, in sync
FreeIPA(self._source).sync(attributes, ak_user, created)
File "/authentik/sources/ldap/sync/vendor/freeipa.py", line 23, in sync
self.check_nsaccountlock(attributes, user)
File "/authentik/sources/ldap/sync/vendor/freeipa.py", line 51, in check_nsaccountlock
user.save()
File "/usr/local/lib/python3.11/site-packages/django/contrib/auth/base_user.py", line 76, in save
super().save(*args, **kwargs)
File "/usr/local/lib/python3.11/site-packages/django/db/models/base.py", line 814, in save
self.save_base(
File "/usr/local/lib/python3.11/site-packages/django/db/models/base.py", line 877, in save_base
updated = self._save_table(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/base.py", line 990, in _save_table
updated = self._do_update(
^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/base.py", line 1054, in _do_update
return filtered._update(values) > 0
^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/query.py", line 1231, in _update
return query.get_compiler(self.db).execute_sql(CURSOR)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1984, in execute_sql
cursor = super().execute_sql(result_type)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1549, in execute_sql
sql, params = self.as_sql()
^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py", line 1947, in as_sql
val = field.get_db_prep_save(val, connection=self.connection)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py", line 954, in get_db_prep_save
return self.get_db_prep_value(value, connection=connection, prepared=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py", line 947, in get_db_prep_value
value = self.get_prep_value(value)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py", line 1122, in get_prep_value
return self.to_python(value)
^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py", line 1112, in to_python
raise exceptions.ValidationError(
django.core.exceptions.ValidationError: ["“['TRUE']” value must be either True or False."]
```
**Version and Deployment (please complete the following information):**
- authentik version: `2023.8.0`, `2023.8.1`
- Deployment: `docker-compose`
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/ldap/sync/vendor/freeipa.py`
Content:
```
1 """FreeIPA specific"""
2 from datetime import datetime
3 from typing import Any, Generator
4
5 from pytz import UTC
6
7 from authentik.core.models import User
8 from authentik.sources.ldap.sync.base import BaseLDAPSynchronizer
9
10
11 class FreeIPA(BaseLDAPSynchronizer):
12 """FreeIPA-specific LDAP"""
13
14 @staticmethod
15 def name() -> str:
16 return "freeipa"
17
18 def get_objects(self, **kwargs) -> Generator:
19 yield None
20
21 def sync(self, attributes: dict[str, Any], user: User, created: bool):
22 self.check_pwd_last_set(attributes, user, created)
23 self.check_nsaccountlock(attributes, user)
24
25 def check_pwd_last_set(self, attributes: dict[str, Any], user: User, created: bool):
26 """Check krbLastPwdChange"""
27 if "krbLastPwdChange" not in attributes:
28 return
29 pwd_last_set: datetime = attributes.get("krbLastPwdChange", datetime.now())
30 pwd_last_set = pwd_last_set.replace(tzinfo=UTC)
31 if created or pwd_last_set >= user.password_change_date:
32 self.message(f"'{user.username}': Reset user's password")
33 self._logger.debug(
34 "Reset user's password",
35 user=user.username,
36 created=created,
37 pwd_last_set=pwd_last_set,
38 )
39 user.set_unusable_password()
40 user.save()
41
42 def check_nsaccountlock(self, attributes: dict[str, Any], user: User):
43 """https://www.port389.org/docs/389ds/howto/howto-account-inactivation.html"""
44 # This is more of a 389-ds quirk rather than FreeIPA, but FreeIPA uses
45 # 389-ds and this will trigger regardless
46 if "nsaccountlock" not in attributes:
47 return
48 is_active = attributes.get("nsaccountlock", False)
49 if is_active != user.is_active:
50 user.is_active = is_active
51 user.save()
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/sources/ldap/sync/vendor/freeipa.py b/authentik/sources/ldap/sync/vendor/freeipa.py
--- a/authentik/sources/ldap/sync/vendor/freeipa.py
+++ b/authentik/sources/ldap/sync/vendor/freeipa.py
@@ -45,7 +45,11 @@
# 389-ds and this will trigger regardless
if "nsaccountlock" not in attributes:
return
- is_active = attributes.get("nsaccountlock", False)
+ # For some reason, nsaccountlock is not defined properly in the schema as bool
+ # hence we get it as a list of strings
+ _is_active = str(self._flatten(attributes.get("nsaccountlock", ["FALSE"])))
+ # So we have to attempt to convert it to a bool
+ is_active = _is_active.lower() == "true"
if is_active != user.is_active:
user.is_active = is_active
user.save()
|
{"golden_diff": "diff --git a/authentik/sources/ldap/sync/vendor/freeipa.py b/authentik/sources/ldap/sync/vendor/freeipa.py\n--- a/authentik/sources/ldap/sync/vendor/freeipa.py\n+++ b/authentik/sources/ldap/sync/vendor/freeipa.py\n@@ -45,7 +45,11 @@\n # 389-ds and this will trigger regardless\n if \"nsaccountlock\" not in attributes:\n return\n- is_active = attributes.get(\"nsaccountlock\", False)\n+ # For some reason, nsaccountlock is not defined properly in the schema as bool\n+ # hence we get it as a list of strings\n+ _is_active = str(self._flatten(attributes.get(\"nsaccountlock\", [\"FALSE\"])))\n+ # So we have to attempt to convert it to a bool\n+ is_active = _is_active.lower() == \"true\"\n if is_active != user.is_active:\n user.is_active = is_active\n user.save()\n", "issue": "FreeIPA LDAP sync broken in 2023.8.*\n**Describe the bug**\r\nAfter upgrading Authentik to `2023.8.0` (and `2023.8.1`), the LDAP sync with FreeIPA seems to be broken. The `ldap_sync:freeipa:groups` tasks complete without issues, but the `ldap_sync:freeipa:users` task fails:\r\n\r\n```\r\nldap_sync:freeipa:users:cd24b105-15d6-4d2b-85bc-29492f0f86a8\r\n\r\nTask finished with errors\r\n[\"\u201c['TRUE']\u201d value must be either True or False.\"]\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to 'if/admin/#/core/sources'\r\n2. Click on the FreeIPA LDAP source\r\n3. Click \"Run Sync Again\"\r\n\r\n**Expected behavior**\r\nIt is expected that the sync completes without errors\r\n\r\n**Screenshots**\r\n\r\n\r\n**Logs**\r\nError log from `if/admin/#/events/log` --> `System task exception: authentik.events.monitored_tasks`:\r\n\r\n```\r\nTask ldap_sync encountered an error: Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/celery/app/trace.py\", line 477, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py\", line 275, in _inner\r\n reraise(*exc_info)\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/_compat.py\", line 60, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py\", line 270, in _inner\r\n return f(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/celery/app/trace.py\", line 760, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/sources/ldap/tasks.py\", line 97, in ldap_sync\r\n count = sync_inst.sync(page)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/sources/ldap/sync/users.py\", line 73, in sync\r\n FreeIPA(self._source).sync(attributes, ak_user, created)\r\n File \"/authentik/sources/ldap/sync/vendor/freeipa.py\", line 23, in sync\r\n self.check_nsaccountlock(attributes, user)\r\n File \"/authentik/sources/ldap/sync/vendor/freeipa.py\", line 51, in check_nsaccountlock\r\n user.save()\r\n File \"/usr/local/lib/python3.11/site-packages/django/contrib/auth/base_user.py\", line 76, in save\r\n super().save(*args, **kwargs)\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/base.py\", line 814, in save\r\n self.save_base(\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/base.py\", line 877, in save_base\r\n updated = self._save_table(\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/base.py\", line 990, in _save_table\r\n updated = self._do_update(\r\n ^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/base.py\", line 1054, in _do_update\r\n return filtered._update(values) > 0\r\n ^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/query.py\", line 1231, in _update\r\n return query.get_compiler(self.db).execute_sql(CURSOR)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1984, in execute_sql\r\n cursor = super().execute_sql(result_type)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1549, in execute_sql\r\n sql, params = self.as_sql()\r\n ^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/sql/compiler.py\", line 1947, in as_sql\r\n val = field.get_db_prep_save(val, connection=self.connection)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py\", line 954, in get_db_prep_save\r\n return self.get_db_prep_value(value, connection=connection, prepared=False)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py\", line 947, in get_db_prep_value\r\n value = self.get_prep_value(value)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py\", line 1122, in get_prep_value\r\n return self.to_python(value)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/django/db/models/fields/__init__.py\", line 1112, in to_python\r\n raise exceptions.ValidationError(\r\ndjango.core.exceptions.ValidationError: [\"\u201c['TRUE']\u201d value must be either True or False.\"]\r\n```\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: `2023.8.0`, `2023.8.1`\r\n- Deployment: `docker-compose`\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\"FreeIPA specific\"\"\"\nfrom datetime import datetime\nfrom typing import Any, Generator\n\nfrom pytz import UTC\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.sync.base import BaseLDAPSynchronizer\n\n\nclass FreeIPA(BaseLDAPSynchronizer):\n \"\"\"FreeIPA-specific LDAP\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"freeipa\"\n\n def get_objects(self, **kwargs) -> Generator:\n yield None\n\n def sync(self, attributes: dict[str, Any], user: User, created: bool):\n self.check_pwd_last_set(attributes, user, created)\n self.check_nsaccountlock(attributes, user)\n\n def check_pwd_last_set(self, attributes: dict[str, Any], user: User, created: bool):\n \"\"\"Check krbLastPwdChange\"\"\"\n if \"krbLastPwdChange\" not in attributes:\n return\n pwd_last_set: datetime = attributes.get(\"krbLastPwdChange\", datetime.now())\n pwd_last_set = pwd_last_set.replace(tzinfo=UTC)\n if created or pwd_last_set >= user.password_change_date:\n self.message(f\"'{user.username}': Reset user's password\")\n self._logger.debug(\n \"Reset user's password\",\n user=user.username,\n created=created,\n pwd_last_set=pwd_last_set,\n )\n user.set_unusable_password()\n user.save()\n\n def check_nsaccountlock(self, attributes: dict[str, Any], user: User):\n \"\"\"https://www.port389.org/docs/389ds/howto/howto-account-inactivation.html\"\"\"\n # This is more of a 389-ds quirk rather than FreeIPA, but FreeIPA uses\n # 389-ds and this will trigger regardless\n if \"nsaccountlock\" not in attributes:\n return\n is_active = attributes.get(\"nsaccountlock\", False)\n if is_active != user.is_active:\n user.is_active = is_active\n user.save()\n", "path": "authentik/sources/ldap/sync/vendor/freeipa.py"}], "after_files": [{"content": "\"\"\"FreeIPA specific\"\"\"\nfrom datetime import datetime\nfrom typing import Any, Generator\n\nfrom pytz import UTC\n\nfrom authentik.core.models import User\nfrom authentik.sources.ldap.sync.base import BaseLDAPSynchronizer\n\n\nclass FreeIPA(BaseLDAPSynchronizer):\n \"\"\"FreeIPA-specific LDAP\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"freeipa\"\n\n def get_objects(self, **kwargs) -> Generator:\n yield None\n\n def sync(self, attributes: dict[str, Any], user: User, created: bool):\n self.check_pwd_last_set(attributes, user, created)\n self.check_nsaccountlock(attributes, user)\n\n def check_pwd_last_set(self, attributes: dict[str, Any], user: User, created: bool):\n \"\"\"Check krbLastPwdChange\"\"\"\n if \"krbLastPwdChange\" not in attributes:\n return\n pwd_last_set: datetime = attributes.get(\"krbLastPwdChange\", datetime.now())\n pwd_last_set = pwd_last_set.replace(tzinfo=UTC)\n if created or pwd_last_set >= user.password_change_date:\n self.message(f\"'{user.username}': Reset user's password\")\n self._logger.debug(\n \"Reset user's password\",\n user=user.username,\n created=created,\n pwd_last_set=pwd_last_set,\n )\n user.set_unusable_password()\n user.save()\n\n def check_nsaccountlock(self, attributes: dict[str, Any], user: User):\n \"\"\"https://www.port389.org/docs/389ds/howto/howto-account-inactivation.html\"\"\"\n # This is more of a 389-ds quirk rather than FreeIPA, but FreeIPA uses\n # 389-ds and this will trigger regardless\n if \"nsaccountlock\" not in attributes:\n return\n # For some reason, nsaccountlock is not defined properly in the schema as bool\n # hence we get it as a list of strings\n _is_active = str(self._flatten(attributes.get(\"nsaccountlock\", [\"FALSE\"])))\n # So we have to attempt to convert it to a bool\n is_active = _is_active.lower() == \"true\"\n if is_active != user.is_active:\n user.is_active = is_active\n user.save()\n", "path": "authentik/sources/ldap/sync/vendor/freeipa.py"}]}
| 2,221 | 219 |
gh_patches_debug_30996
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-2328
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cosmetic space to add for TensorsNotCollocatedException
**Describe the bug**
Cosmetic spacing, just 1, missing for TensorsNotCollocatedException
**To Reproduce**
1. initiate two tensors, one send to remote worker, one locate
2. add two tensors
2. get TensorsNotCollocatedException
```
bob = sy.VirtualWorker(hook, id="bob")
x = torch.tensor([1,2,3,4,5]).send(bob)
y = torch.tensor([1,1,1,1,1])
z = x+y
```
**Expected behavior**
" involving two tensors where one tensor is actually locatedon another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
becomes
" involving two tensors where one tensor is actually located on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
`locatedon` becomes `located on`
**Screenshots**
[screenshot of error message](https://github.com/theoptips/udacity_project_submission/blob/master/Screen%20Shot%202019-06-28%20at%202.50.34%20PM.png)
[exception.py with proposed change](https://github.com/theoptips/PySyft/blob/dev/syft/exceptions.py)
[commit message with proposed change explained](https://github.com/theoptips/PySyft/commit/533da84afa6ac4071a58754e97c21ce1ca7056aa)
[exception.py changed line highlighted](https://github.com/theoptips/PySyft/commit/4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `syft/exceptions.py`
Content:
```
1 """Specific Pysyft exceptions."""
2
3 import syft as sy
4 import torch
5 from tblib import Traceback
6 import traceback
7 from six import reraise
8 from typing import Tuple
9
10
11 class PureTorchTensorFoundError(BaseException):
12 """Exception raised for errors in the input.
13 This error is used in a recursive analysis of the args provided as an
14 input of a function, to break the recursion if a TorchTensor is found
15 as it means that _probably_ all the tensors are pure torch tensor and
16 the function can be applied natively on this input.
17
18 Attributes:
19 expression -- input expression in which the error occurred
20 message -- explanation of the error
21 """
22
23 pass
24
25
26 class RemoteObjectFoundError(BaseException):
27 """Exception raised for errors in the input.
28 This error is used in a context similar to PureTorchTensorFoundError but
29 to indicate that a Pointer to a remote tensor was found in the input
30 and thus that the command should be send elsewhere. The pointer retrieved
31 by the error gives the location where the command should be sent.
32
33 Attributes:
34 expression -- input expression in which the error occurred
35 message -- explanation of the error
36 """
37
38 def __init__(self, pointer):
39 self.pointer = pointer
40
41
42 class InvalidTensorForRemoteGet(Exception):
43 """Raised when a chain of pointer tensors is not provided for `remote_get`."""
44
45 def __init__(self, tensor: object):
46 message = "Tensor does not have attribute child. You remote get should be called on a chain of pointer tensors, instead you called it on {}.".format(
47 tensor
48 )
49 super().__init__(message)
50
51
52 class WorkerNotFoundException(Exception):
53 """Raised when a non-existent worker is requested."""
54
55 pass
56
57
58 class CompressionNotFoundException(Exception):
59 """Raised when a non existent compression/decompression scheme is requested."""
60
61 pass
62
63
64 class CannotRequestObjectAttribute(Exception):
65 """Raised when .get() is called on a pointer which points to an attribute of
66 another object."""
67
68 pass
69
70
71 class TensorsNotCollocatedException(Exception):
72 """Raised when a command is executed on two tensors which are not
73 on the same machine. The goal is to provide as useful input as possible
74 to help the user identify which tensors are where so that they can debug
75 which one needs to be moved."""
76
77 def __init__(self, tensor_a, tensor_b, attr="a method"):
78
79 if hasattr(tensor_a, "child") and tensor_a.is_wrapper:
80 tensor_a = tensor_a.child
81
82 if hasattr(tensor_b, "child") and tensor_b.is_wrapper:
83 tensor_b = tensor_b.child
84
85 if isinstance(tensor_a, sy.PointerTensor) and isinstance(tensor_b, sy.PointerTensor):
86 message = (
87 "You tried to call "
88 + attr
89 + " involving two tensors which "
90 + "are not on the same machine! One tensor is on "
91 + str(tensor_a.location)
92 + " while the other is on "
93 + str(tensor_b.location)
94 + ". Use a combination of .move(), .get(), and/or .send() to co-locate them to the same machine."
95 )
96 elif isinstance(tensor_a, sy.PointerTensor):
97 message = (
98 "You tried to call "
99 + attr
100 + " involving two tensors where one tensor is actually located"
101 + "on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
102 + str(tensor_a.location.id)
103 + ") on the other tensor.\n"
104 + "\nTensor A: "
105 + str(tensor_a)
106 + "\nTensor B: "
107 + str(tensor_b)
108 )
109 elif isinstance(tensor_b, sy.PointerTensor):
110 message = (
111 "You tried to call "
112 + attr
113 + " involving two tensors where one tensor is actually located"
114 + "on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
115 + str(tensor_b.location.id)
116 + ") on the other tensor.\n"
117 + "\nTensor A: "
118 + str(tensor_a)
119 + "\nTensor B: "
120 + str(tensor_b)
121 )
122 else:
123 message = (
124 "You tried to call "
125 + attr
126 + " involving two tensors which are not on the same machine."
127 + "Try calling .send(), .move(), and/or .get() on these tensors to get them to the same"
128 + "worker before calling methods that involve them working together."
129 )
130
131 super().__init__(message)
132
133 self.tensor_a = tensor_a
134 self.tensor_b = tensor_b
135
136
137 class ResponseSignatureError(Exception):
138 """Raised when the return of a hooked function is not correctly predicted
139 (when defining in advance ids for results)"""
140
141 def __init__(self, ids_generated=None):
142 self.ids_generated = ids_generated
143
144 def get_attributes(self):
145 """
146 Specify all the attributes need to report an error correctly.
147 """
148 return {"ids_generated": self.ids_generated}
149
150 @staticmethod
151 def simplify(e):
152 """
153 Serialize information about an Exception which was raised to forward it
154 """
155 # Get information about the exception: type of error, traceback
156 tp = type(e)
157 tb = e.__traceback__
158 # Serialize the traceback
159 traceback_str = "Traceback (most recent call last):\n" + "".join(traceback.format_tb(tb))
160 # Include special attributes if relevant
161 try:
162 attributes = e.get_attributes()
163 except AttributeError:
164 attributes = {}
165 return tp.__name__, traceback_str, sy.serde._simplify(attributes)
166
167 @staticmethod
168 def detail(worker: "sy.workers.AbstractWorker", error_tuple: Tuple[str, str, dict]):
169 """
170 Detail and re-raise an Exception forwarded by another worker
171 """
172 error_name, traceback_str, attributes = error_tuple
173 error_name, traceback_str = error_name.decode("utf-8"), traceback_str.decode("utf-8")
174 attributes = sy.serde._detail(worker, attributes)
175 # De-serialize the traceback
176 tb = Traceback.from_string(traceback_str)
177 # Check that the error belongs to a valid set of Exceptions
178 if error_name in dir(sy.exceptions):
179 error_type = getattr(sy.exceptions, error_name)
180 error = error_type()
181 # Include special attributes if any
182 for attr_name, attr in attributes.items():
183 setattr(error, attr_name, attr)
184 reraise(error_type, error, tb.as_traceback())
185 else:
186 raise ValueError(f"Invalid Exception returned:\n{traceback_str}")
187
188
189 class GetNotPermittedError(Exception):
190 """Raised when calling get on a pointer to a tensor which does not allow
191 get to be called on it. This can happen do to sensitivity being too high"""
192
193 @staticmethod
194 def simplify(e):
195 """
196 Serialize information about an Exception which was raised to forward it
197 """
198 # Get information about the exception: type of error, traceback
199 tp = type(e)
200 tb = e.__traceback__
201 # Serialize the traceback
202 traceback_str = "Traceback (most recent call last):\n" + "".join(traceback.format_tb(tb))
203 # Include special attributes if relevant
204 try:
205 attributes = e.get_attributes()
206 except AttributeError:
207 attributes = {}
208 return tp.__name__, traceback_str, sy.serde._simplify(attributes)
209
210 @staticmethod
211 def detail(worker: "sy.workers.AbstractWorker", error_tuple: Tuple[str, str, dict]):
212 """
213 Detail and re-raise an Exception forwarded by another worker
214 """
215 error_name, traceback_str, attributes = error_tuple
216 error_name, traceback_str = error_name.decode("utf-8"), traceback_str.decode("utf-8")
217 attributes = sy.serde._detail(worker, attributes)
218 # De-serialize the traceback
219 tb = Traceback.from_string(traceback_str)
220 # Check that the error belongs to a valid set of Exceptions
221 if error_name in dir(sy.exceptions):
222 error_type = getattr(sy.exceptions, error_name)
223 error = error_type()
224 # Include special attributes if any
225 for attr_name, attr in attributes.items():
226 setattr(error, attr_name, attr)
227 reraise(error_type, error, tb.as_traceback())
228 else:
229 raise ValueError(f"Invalid Exception returned:\n{traceback_str}")
230
231
232 class IdNotUniqueError(Exception):
233 """Raised by the ID Provider when setting ids that have already been generated"""
234
235 pass
236
237
238 def route_method_exception(exception, self, args, kwargs):
239 try:
240 if self.is_wrapper:
241 if isinstance(self.child, sy.PointerTensor):
242 if len(args) > 0:
243 if not args[0].is_wrapper:
244 return TensorsNotCollocatedException(self, args[0])
245 elif isinstance(args[0].child, sy.PointerTensor):
246 if self.location != args[0].child.location:
247 return TensorsNotCollocatedException(self, args[0])
248
249 # if self is a normal tensor
250 elif isinstance(self, torch.Tensor):
251 if len(args) > 0:
252 if args[0].is_wrapper:
253 if isinstance(args[0].child, sy.PointerTensor):
254 return TensorsNotCollocatedException(self, args[0])
255 elif isinstance(args[0], sy.PointerTensor):
256 return TensorsNotCollocatedException(self, args[0])
257 except:
258 ""
259 return exception
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/syft/exceptions.py b/syft/exceptions.py
--- a/syft/exceptions.py
+++ b/syft/exceptions.py
@@ -86,8 +86,8 @@
message = (
"You tried to call "
+ attr
- + " involving two tensors which "
- + "are not on the same machine! One tensor is on "
+ + " involving two tensors which"
+ + " are not on the same machine! One tensor is on "
+ str(tensor_a.location)
+ " while the other is on "
+ str(tensor_b.location)
@@ -98,7 +98,7 @@
"You tried to call "
+ attr
+ " involving two tensors where one tensor is actually located"
- + "on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ + " on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ str(tensor_a.location.id)
+ ") on the other tensor.\n"
+ "\nTensor A: "
@@ -111,7 +111,7 @@
"You tried to call "
+ attr
+ " involving two tensors where one tensor is actually located"
- + "on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ + " on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send("
+ str(tensor_b.location.id)
+ ") on the other tensor.\n"
+ "\nTensor A: "
|
{"golden_diff": "diff --git a/syft/exceptions.py b/syft/exceptions.py\n--- a/syft/exceptions.py\n+++ b/syft/exceptions.py\n@@ -86,8 +86,8 @@\n message = (\n \"You tried to call \"\n + attr\n- + \" involving two tensors which \"\n- + \"are not on the same machine! One tensor is on \"\n+ + \" involving two tensors which\"\n+ + \" are not on the same machine! One tensor is on \"\n + str(tensor_a.location)\n + \" while the other is on \"\n + str(tensor_b.location)\n@@ -98,7 +98,7 @@\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n- + \"on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n+ + \" on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_a.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n@@ -111,7 +111,7 @@\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n- + \"on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n+ + \" on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_b.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n", "issue": "Cosmetic space to add for TensorsNotCollocatedException\n**Describe the bug**\r\nCosmetic spacing, just 1, missing for TensorsNotCollocatedException\r\n\r\n**To Reproduce**\r\n1. initiate two tensors, one send to remote worker, one locate\r\n2. add two tensors\r\n2. get TensorsNotCollocatedException\r\n\r\n```\r\nbob = sy.VirtualWorker(hook, id=\"bob\")\r\nx = torch.tensor([1,2,3,4,5]).send(bob)\r\ny = torch.tensor([1,1,1,1,1])\r\nz = x+y\r\n```\r\n\r\n**Expected behavior**\r\n\" involving two tensors where one tensor is actually locatedon another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\r\n\r\nbecomes \r\n\" involving two tensors where one tensor is actually located on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\r\n\r\n`locatedon` becomes `located on`\r\n\r\n\r\n**Screenshots**\r\n[screenshot of error message](https://github.com/theoptips/udacity_project_submission/blob/master/Screen%20Shot%202019-06-28%20at%202.50.34%20PM.png)\r\n\r\n\r\n\r\n[exception.py with proposed change](https://github.com/theoptips/PySyft/blob/dev/syft/exceptions.py)\r\n[commit message with proposed change explained](https://github.com/theoptips/PySyft/commit/533da84afa6ac4071a58754e97c21ce1ca7056aa)\r\n[exception.py changed line highlighted](https://github.com/theoptips/PySyft/commit/4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc)\n", "before_files": [{"content": "\"\"\"Specific Pysyft exceptions.\"\"\"\n\nimport syft as sy\nimport torch\nfrom tblib import Traceback\nimport traceback\nfrom six import reraise\nfrom typing import Tuple\n\n\nclass PureTorchTensorFoundError(BaseException):\n \"\"\"Exception raised for errors in the input.\n This error is used in a recursive analysis of the args provided as an\n input of a function, to break the recursion if a TorchTensor is found\n as it means that _probably_ all the tensors are pure torch tensor and\n the function can be applied natively on this input.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n pass\n\n\nclass RemoteObjectFoundError(BaseException):\n \"\"\"Exception raised for errors in the input.\n This error is used in a context similar to PureTorchTensorFoundError but\n to indicate that a Pointer to a remote tensor was found in the input\n and thus that the command should be send elsewhere. The pointer retrieved\n by the error gives the location where the command should be sent.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, pointer):\n self.pointer = pointer\n\n\nclass InvalidTensorForRemoteGet(Exception):\n \"\"\"Raised when a chain of pointer tensors is not provided for `remote_get`.\"\"\"\n\n def __init__(self, tensor: object):\n message = \"Tensor does not have attribute child. You remote get should be called on a chain of pointer tensors, instead you called it on {}.\".format(\n tensor\n )\n super().__init__(message)\n\n\nclass WorkerNotFoundException(Exception):\n \"\"\"Raised when a non-existent worker is requested.\"\"\"\n\n pass\n\n\nclass CompressionNotFoundException(Exception):\n \"\"\"Raised when a non existent compression/decompression scheme is requested.\"\"\"\n\n pass\n\n\nclass CannotRequestObjectAttribute(Exception):\n \"\"\"Raised when .get() is called on a pointer which points to an attribute of\n another object.\"\"\"\n\n pass\n\n\nclass TensorsNotCollocatedException(Exception):\n \"\"\"Raised when a command is executed on two tensors which are not\n on the same machine. The goal is to provide as useful input as possible\n to help the user identify which tensors are where so that they can debug\n which one needs to be moved.\"\"\"\n\n def __init__(self, tensor_a, tensor_b, attr=\"a method\"):\n\n if hasattr(tensor_a, \"child\") and tensor_a.is_wrapper:\n tensor_a = tensor_a.child\n\n if hasattr(tensor_b, \"child\") and tensor_b.is_wrapper:\n tensor_b = tensor_b.child\n\n if isinstance(tensor_a, sy.PointerTensor) and isinstance(tensor_b, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors which \"\n + \"are not on the same machine! One tensor is on \"\n + str(tensor_a.location)\n + \" while the other is on \"\n + str(tensor_b.location)\n + \". Use a combination of .move(), .get(), and/or .send() to co-locate them to the same machine.\"\n )\n elif isinstance(tensor_a, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n + \"on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_a.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n + str(tensor_a)\n + \"\\nTensor B: \"\n + str(tensor_b)\n )\n elif isinstance(tensor_b, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n + \"on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_b.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n + str(tensor_a)\n + \"\\nTensor B: \"\n + str(tensor_b)\n )\n else:\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors which are not on the same machine.\"\n + \"Try calling .send(), .move(), and/or .get() on these tensors to get them to the same\"\n + \"worker before calling methods that involve them working together.\"\n )\n\n super().__init__(message)\n\n self.tensor_a = tensor_a\n self.tensor_b = tensor_b\n\n\nclass ResponseSignatureError(Exception):\n \"\"\"Raised when the return of a hooked function is not correctly predicted\n (when defining in advance ids for results)\"\"\"\n\n def __init__(self, ids_generated=None):\n self.ids_generated = ids_generated\n\n def get_attributes(self):\n \"\"\"\n Specify all the attributes need to report an error correctly.\n \"\"\"\n return {\"ids_generated\": self.ids_generated}\n\n @staticmethod\n def simplify(e):\n \"\"\"\n Serialize information about an Exception which was raised to forward it\n \"\"\"\n # Get information about the exception: type of error, traceback\n tp = type(e)\n tb = e.__traceback__\n # Serialize the traceback\n traceback_str = \"Traceback (most recent call last):\\n\" + \"\".join(traceback.format_tb(tb))\n # Include special attributes if relevant\n try:\n attributes = e.get_attributes()\n except AttributeError:\n attributes = {}\n return tp.__name__, traceback_str, sy.serde._simplify(attributes)\n\n @staticmethod\n def detail(worker: \"sy.workers.AbstractWorker\", error_tuple: Tuple[str, str, dict]):\n \"\"\"\n Detail and re-raise an Exception forwarded by another worker\n \"\"\"\n error_name, traceback_str, attributes = error_tuple\n error_name, traceback_str = error_name.decode(\"utf-8\"), traceback_str.decode(\"utf-8\")\n attributes = sy.serde._detail(worker, attributes)\n # De-serialize the traceback\n tb = Traceback.from_string(traceback_str)\n # Check that the error belongs to a valid set of Exceptions\n if error_name in dir(sy.exceptions):\n error_type = getattr(sy.exceptions, error_name)\n error = error_type()\n # Include special attributes if any\n for attr_name, attr in attributes.items():\n setattr(error, attr_name, attr)\n reraise(error_type, error, tb.as_traceback())\n else:\n raise ValueError(f\"Invalid Exception returned:\\n{traceback_str}\")\n\n\nclass GetNotPermittedError(Exception):\n \"\"\"Raised when calling get on a pointer to a tensor which does not allow\n get to be called on it. This can happen do to sensitivity being too high\"\"\"\n\n @staticmethod\n def simplify(e):\n \"\"\"\n Serialize information about an Exception which was raised to forward it\n \"\"\"\n # Get information about the exception: type of error, traceback\n tp = type(e)\n tb = e.__traceback__\n # Serialize the traceback\n traceback_str = \"Traceback (most recent call last):\\n\" + \"\".join(traceback.format_tb(tb))\n # Include special attributes if relevant\n try:\n attributes = e.get_attributes()\n except AttributeError:\n attributes = {}\n return tp.__name__, traceback_str, sy.serde._simplify(attributes)\n\n @staticmethod\n def detail(worker: \"sy.workers.AbstractWorker\", error_tuple: Tuple[str, str, dict]):\n \"\"\"\n Detail and re-raise an Exception forwarded by another worker\n \"\"\"\n error_name, traceback_str, attributes = error_tuple\n error_name, traceback_str = error_name.decode(\"utf-8\"), traceback_str.decode(\"utf-8\")\n attributes = sy.serde._detail(worker, attributes)\n # De-serialize the traceback\n tb = Traceback.from_string(traceback_str)\n # Check that the error belongs to a valid set of Exceptions\n if error_name in dir(sy.exceptions):\n error_type = getattr(sy.exceptions, error_name)\n error = error_type()\n # Include special attributes if any\n for attr_name, attr in attributes.items():\n setattr(error, attr_name, attr)\n reraise(error_type, error, tb.as_traceback())\n else:\n raise ValueError(f\"Invalid Exception returned:\\n{traceback_str}\")\n\n\nclass IdNotUniqueError(Exception):\n \"\"\"Raised by the ID Provider when setting ids that have already been generated\"\"\"\n\n pass\n\n\ndef route_method_exception(exception, self, args, kwargs):\n try:\n if self.is_wrapper:\n if isinstance(self.child, sy.PointerTensor):\n if len(args) > 0:\n if not args[0].is_wrapper:\n return TensorsNotCollocatedException(self, args[0])\n elif isinstance(args[0].child, sy.PointerTensor):\n if self.location != args[0].child.location:\n return TensorsNotCollocatedException(self, args[0])\n\n # if self is a normal tensor\n elif isinstance(self, torch.Tensor):\n if len(args) > 0:\n if args[0].is_wrapper:\n if isinstance(args[0].child, sy.PointerTensor):\n return TensorsNotCollocatedException(self, args[0])\n elif isinstance(args[0], sy.PointerTensor):\n return TensorsNotCollocatedException(self, args[0])\n except:\n \"\"\n return exception\n", "path": "syft/exceptions.py"}], "after_files": [{"content": "\"\"\"Specific Pysyft exceptions.\"\"\"\n\nimport syft as sy\nimport torch\nfrom tblib import Traceback\nimport traceback\nfrom six import reraise\nfrom typing import Tuple\n\n\nclass PureTorchTensorFoundError(BaseException):\n \"\"\"Exception raised for errors in the input.\n This error is used in a recursive analysis of the args provided as an\n input of a function, to break the recursion if a TorchTensor is found\n as it means that _probably_ all the tensors are pure torch tensor and\n the function can be applied natively on this input.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n pass\n\n\nclass RemoteObjectFoundError(BaseException):\n \"\"\"Exception raised for errors in the input.\n This error is used in a context similar to PureTorchTensorFoundError but\n to indicate that a Pointer to a remote tensor was found in the input\n and thus that the command should be send elsewhere. The pointer retrieved\n by the error gives the location where the command should be sent.\n\n Attributes:\n expression -- input expression in which the error occurred\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, pointer):\n self.pointer = pointer\n\n\nclass InvalidTensorForRemoteGet(Exception):\n \"\"\"Raised when a chain of pointer tensors is not provided for `remote_get`.\"\"\"\n\n def __init__(self, tensor: object):\n message = \"Tensor does not have attribute child. You remote get should be called on a chain of pointer tensors, instead you called it on {}.\".format(\n tensor\n )\n super().__init__(message)\n\n\nclass WorkerNotFoundException(Exception):\n \"\"\"Raised when a non-existent worker is requested.\"\"\"\n\n pass\n\n\nclass CompressionNotFoundException(Exception):\n \"\"\"Raised when a non existent compression/decompression scheme is requested.\"\"\"\n\n pass\n\n\nclass CannotRequestObjectAttribute(Exception):\n \"\"\"Raised when .get() is called on a pointer which points to an attribute of\n another object.\"\"\"\n\n pass\n\n\nclass TensorsNotCollocatedException(Exception):\n \"\"\"Raised when a command is executed on two tensors which are not\n on the same machine. The goal is to provide as useful input as possible\n to help the user identify which tensors are where so that they can debug\n which one needs to be moved.\"\"\"\n\n def __init__(self, tensor_a, tensor_b, attr=\"a method\"):\n\n if hasattr(tensor_a, \"child\") and tensor_a.is_wrapper:\n tensor_a = tensor_a.child\n\n if hasattr(tensor_b, \"child\") and tensor_b.is_wrapper:\n tensor_b = tensor_b.child\n\n if isinstance(tensor_a, sy.PointerTensor) and isinstance(tensor_b, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors which\"\n + \" are not on the same machine! One tensor is on \"\n + str(tensor_a.location)\n + \" while the other is on \"\n + str(tensor_b.location)\n + \". Use a combination of .move(), .get(), and/or .send() to co-locate them to the same machine.\"\n )\n elif isinstance(tensor_a, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n + \" on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_a.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n + str(tensor_a)\n + \"\\nTensor B: \"\n + str(tensor_b)\n )\n elif isinstance(tensor_b, sy.PointerTensor):\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors where one tensor is actually located\"\n + \" on another machine (is a PointerTensor). Call .get() on the PointerTensor or .send(\"\n + str(tensor_b.location.id)\n + \") on the other tensor.\\n\"\n + \"\\nTensor A: \"\n + str(tensor_a)\n + \"\\nTensor B: \"\n + str(tensor_b)\n )\n else:\n message = (\n \"You tried to call \"\n + attr\n + \" involving two tensors which are not on the same machine.\"\n + \"Try calling .send(), .move(), and/or .get() on these tensors to get them to the same\"\n + \"worker before calling methods that involve them working together.\"\n )\n\n super().__init__(message)\n\n self.tensor_a = tensor_a\n self.tensor_b = tensor_b\n\n\nclass ResponseSignatureError(Exception):\n \"\"\"Raised when the return of a hooked function is not correctly predicted\n (when defining in advance ids for results)\"\"\"\n\n def __init__(self, ids_generated=None):\n self.ids_generated = ids_generated\n\n def get_attributes(self):\n \"\"\"\n Specify all the attributes need to report an error correctly.\n \"\"\"\n return {\"ids_generated\": self.ids_generated}\n\n @staticmethod\n def simplify(e):\n \"\"\"\n Serialize information about an Exception which was raised to forward it\n \"\"\"\n # Get information about the exception: type of error, traceback\n tp = type(e)\n tb = e.__traceback__\n # Serialize the traceback\n traceback_str = \"Traceback (most recent call last):\\n\" + \"\".join(traceback.format_tb(tb))\n # Include special attributes if relevant\n try:\n attributes = e.get_attributes()\n except AttributeError:\n attributes = {}\n return tp.__name__, traceback_str, sy.serde._simplify(attributes)\n\n @staticmethod\n def detail(worker: \"sy.workers.AbstractWorker\", error_tuple: Tuple[str, str, dict]):\n \"\"\"\n Detail and re-raise an Exception forwarded by another worker\n \"\"\"\n error_name, traceback_str, attributes = error_tuple\n error_name, traceback_str = error_name.decode(\"utf-8\"), traceback_str.decode(\"utf-8\")\n attributes = sy.serde._detail(worker, attributes)\n # De-serialize the traceback\n tb = Traceback.from_string(traceback_str)\n # Check that the error belongs to a valid set of Exceptions\n if error_name in dir(sy.exceptions):\n error_type = getattr(sy.exceptions, error_name)\n error = error_type()\n # Include special attributes if any\n for attr_name, attr in attributes.items():\n setattr(error, attr_name, attr)\n reraise(error_type, error, tb.as_traceback())\n else:\n raise ValueError(f\"Invalid Exception returned:\\n{traceback_str}\")\n\n\nclass GetNotPermittedError(Exception):\n \"\"\"Raised when calling get on a pointer to a tensor which does not allow\n get to be called on it. This can happen do to sensitivity being too high\"\"\"\n\n @staticmethod\n def simplify(e):\n \"\"\"\n Serialize information about an Exception which was raised to forward it\n \"\"\"\n # Get information about the exception: type of error, traceback\n tp = type(e)\n tb = e.__traceback__\n # Serialize the traceback\n traceback_str = \"Traceback (most recent call last):\\n\" + \"\".join(traceback.format_tb(tb))\n # Include special attributes if relevant\n try:\n attributes = e.get_attributes()\n except AttributeError:\n attributes = {}\n return tp.__name__, traceback_str, sy.serde._simplify(attributes)\n\n @staticmethod\n def detail(worker: \"sy.workers.AbstractWorker\", error_tuple: Tuple[str, str, dict]):\n \"\"\"\n Detail and re-raise an Exception forwarded by another worker\n \"\"\"\n error_name, traceback_str, attributes = error_tuple\n error_name, traceback_str = error_name.decode(\"utf-8\"), traceback_str.decode(\"utf-8\")\n attributes = sy.serde._detail(worker, attributes)\n # De-serialize the traceback\n tb = Traceback.from_string(traceback_str)\n # Check that the error belongs to a valid set of Exceptions\n if error_name in dir(sy.exceptions):\n error_type = getattr(sy.exceptions, error_name)\n error = error_type()\n # Include special attributes if any\n for attr_name, attr in attributes.items():\n setattr(error, attr_name, attr)\n reraise(error_type, error, tb.as_traceback())\n else:\n raise ValueError(f\"Invalid Exception returned:\\n{traceback_str}\")\n\n\nclass IdNotUniqueError(Exception):\n \"\"\"Raised by the ID Provider when setting ids that have already been generated\"\"\"\n\n pass\n\n\ndef route_method_exception(exception, self, args, kwargs):\n try:\n if self.is_wrapper:\n if isinstance(self.child, sy.PointerTensor):\n if len(args) > 0:\n if not args[0].is_wrapper:\n return TensorsNotCollocatedException(self, args[0])\n elif isinstance(args[0].child, sy.PointerTensor):\n if self.location != args[0].child.location:\n return TensorsNotCollocatedException(self, args[0])\n\n # if self is a normal tensor\n elif isinstance(self, torch.Tensor):\n if len(args) > 0:\n if args[0].is_wrapper:\n if isinstance(args[0].child, sy.PointerTensor):\n return TensorsNotCollocatedException(self, args[0])\n elif isinstance(args[0], sy.PointerTensor):\n return TensorsNotCollocatedException(self, args[0])\n except:\n \"\"\n return exception\n", "path": "syft/exceptions.py"}]}
| 3,393 | 362 |
gh_patches_debug_16528
|
rasdani/github-patches
|
git_diff
|
d2l-ai__d2l-vi-1014
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "Deep Learning Computation" chapter
- [x] index.md
- [x] model-construction.md
- [x] parameters.md
- [x] deferred-init.md
- [x] custom-layer.md
- [x] read-write.md
- [x] use-gpu.md
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils.py`
Content:
```
1 # encoding=utf8
2 import codecs
3 import filecmp
4 import re
5 import sys
6 import argparse
7
8 BEGIN_BLOCK_COMMENT = '<!--\n'
9 END_BLOCK_COMMENT = '-->\n\n'
10 TRANSLATE_INDICATOR = '*dịch đoạn phía trên*'
11 HEADER_INDICATOR = ' *dịch tiêu đề phía trên*\n'
12 IMAGE_CAPTION_INDICATOR = '*dịch chú thích ảnh phía trên*'
13 START_FILE = '<!-- ===================== Bắt đầu dịch Phần 1 ==================== -->\n'
14 END_FILE = '<!-- ===================== Kết thúc dịch Phần 1 ==================== -->\n'
15 SUFIX_PATH = 'contributors_template_vn.md'
16
17 # Our special mark in markdown, e.g. :label:`chapter_intro`
18 MARK_RE_MD = re.compile(':([-\/\\._\w\d]+):`([\*-\/\\\._\w\d]+)`')
19
20 parser = argparse.ArgumentParser(description='Dịch Dive into Deep Learning')
21 parser.add_argument('--convert', type=str, help='path to md file')
22
23
24 def is_blank_line(line):
25 return line.strip() == ''
26
27
28 class MyLine(object):
29 def __init__(self, line_str, in_code_block):
30 self.line_str = line_str.replace(' -- ', ' \-\- ')
31 self.in_code_block = in_code_block
32 self.end_comment_if_next_line_blank = None
33
34 def process(self, file_writer, last_line):
35 if self.in_code_block:
36 file_writer.write(self.line_str)
37 else:
38 self._process(file_writer, last_line)
39 return self
40
41 def _process(self, file_writer, last_line):
42 raise NotImplementedError
43
44
45 class NormalLine(MyLine):
46 def __init__(self, line_str, in_code_block):
47 super(NormalLine, self).__init__(line_str, in_code_block)
48 self.end_comment_if_next_line_blank = True
49
50 def _process(self, file_writer, last_line):
51 if isinstance(last_line, BlankLine):
52 file_writer.write(BEGIN_BLOCK_COMMENT)
53 file_writer.write(self.line_str)
54
55
56 class BlankLine(MyLine):
57 def __init__(self, line_str, in_code_block):
58 super(BlankLine, self).__init__(line_str, in_code_block)
59 self.end_comment_if_next_line_blank = False
60
61 def _process(self, file_writer, last_line):
62 if last_line.end_comment_if_next_line_blank:
63 file_writer.write(END_BLOCK_COMMENT)
64 file_writer.write(TRANSLATE_INDICATOR)
65 file_writer.write('\n')
66 file_writer.write('\n')
67
68
69 class HeaderLine(MyLine):
70 def __init__(self, line_str, in_code_block):
71 super(HeaderLine, self).__init__(line_str, in_code_block)
72 self.end_comment_if_next_line_blank = False
73 self.heading = 0
74 cnt = 0
75 for char in self.line_str:
76 if char == '#':
77 cnt += 1
78 elif char == ' ':
79 self.heading = cnt
80 break
81 else:
82 assert False, self.line_str
83
84 def _process(self, file_writer, last_line):
85 assert isinstance(last_line, BlankLine),\
86 last_line.line_str
87 file_writer.write(BEGIN_BLOCK_COMMENT)
88 file_writer.write(self.line_str)
89 file_writer.write(END_BLOCK_COMMENT)
90 file_writer.write('#'*self.heading + HEADER_INDICATOR)
91
92
93 class ImageLine(MyLine):
94 def __init(self, line_str, in_code_block):
95 assert not in_code_block
96 super(ImageLine, self).__init__(line_str, in_code_block)
97
98 def _process(self, file_writer, last_line):
99 close_square_bracket_id = self.line_str.index(']')
100 assert self.line_str[close_square_bracket_id+1] == '(', self.line_str
101 # assert self.line_str.endswith(')'), self.line_str
102 file_writer.write(BEGIN_BLOCK_COMMENT)
103 file_writer.write(self.line_str)
104 file_writer.write(END_BLOCK_COMMENT)
105 file_writer.write(
106 '![' + IMAGE_CAPTION_INDICATOR + ']' + self.line_str[close_square_bracket_id+1:]
107 )
108
109
110 class CodeMarkerLine(MyLine):
111 def __init__(self, line_str, in_code_block):
112 super(CodeMarkerLine, self).__init__(line_str, in_code_block)
113 self.end_comment_if_next_line_blank = False
114
115 def _process(self, file_writer, last_line):
116 """ the print is printed in the super class"""
117 file_writer.write(self.line_str)
118
119
120
121 class MathLine(MyLine):
122 def __init__(self, line_str, in_code_block):
123 super(MathLine, self).__init__(line_str, in_code_block)
124 self.end_comment_if_next_line_blank = False
125
126 def _process(self, file_writer, last_line):
127 file_writer.write(self.line_str)
128 return self
129
130
131 class LabelLine(MyLine):
132 def __init__(self, line_str, in_code_block):
133 super(LabelLine, self).__init__(line_str, in_code_block)
134 self.end_comment_if_next_line_blank = False
135
136 def _process(self, file_writer, last_line):
137 # assert isinstance(last_line, HeaderLine) or isinstance(last_line, ImageLine), 'last line: {}\nthis_line: {}'.format(
138 # last_line.line_str, self.line_str
139 # )
140 file_writer.write(self.line_str)
141 # file_writer.write('\n')
142 return self
143
144
145 def block_comment(input_md, output_md, add_prefix_suffix=False):
146 last_line = BlankLine('', False)
147 in_code_block = False
148 with codecs.open(input_md, 'r', encoding='utf-8') as input_handle,\
149 codecs.open(output_md, 'w', encoding='utf-8') as output_handle,\
150 codecs.open(SUFIX_PATH, 'r', encoding='utf-8') as surfix_handle:
151 if add_prefix_suffix:
152 output_handle.write(START_FILE)
153 output_handle.write('\n')
154 for line_str in input_handle:
155 line_str = line_str.rstrip() + '\n'
156 line_str = line_str.replace(' -- ', ' \-\- ')
157 match = MARK_RE_MD.match(line_str)
158 if is_blank_line(line_str):
159 line_type = BlankLine
160 elif line_str.startswith('#'):
161 line_type = HeaderLine
162 elif line_str.startswith('!['):
163 line_type = ImageLine
164 elif line_str.startswith('$'):
165 line_type = MathLine
166 elif line_str.startswith('```'):
167 in_code_block = not in_code_block
168 line_type = CodeMarkerLine
169 elif match is not None and match[1] in ['label', 'eqlabel']:
170 line_type = LabelLine
171 else:
172 line_type = NormalLine
173
174 this_line = line_type(line_str, in_code_block)
175 last_line = this_line.process(output_handle, last_line)
176
177 assert in_code_block is False
178
179 # TODO: simplify 5 lines below
180 if isinstance(last_line, BlankLine) or isinstance(last_line, LabelLine)\
181 or isinstance(last_line, CodeMarkerLine) or isinstance(last_line, ImageLine):
182 print('skip')
183 else:
184 output_handle.write(END_BLOCK_COMMENT)
185 output_handle.write(TRANSLATE_INDICATOR)
186 if add_prefix_suffix:
187 output_handle.write('\n')
188 output_handle.write(END_FILE)
189 output_handle.write('\n')
190 for line in surfix_handle:
191 output_handle.write(line)
192
193
194 if __name__ == '__main__':
195 args = parser.parse_args()
196 input_md = args.convert
197 output_md = input_md[:-len('.md')] + '_vn.md'
198 block_comment(input_md, output_md, add_prefix_suffix=True)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/utils.py b/utils.py
--- a/utils.py
+++ b/utils.py
@@ -10,8 +10,8 @@
TRANSLATE_INDICATOR = '*dịch đoạn phía trên*'
HEADER_INDICATOR = ' *dịch tiêu đề phía trên*\n'
IMAGE_CAPTION_INDICATOR = '*dịch chú thích ảnh phía trên*'
-START_FILE = '<!-- ===================== Bắt đầu dịch Phần 1 ==================== -->\n'
-END_FILE = '<!-- ===================== Kết thúc dịch Phần 1 ==================== -->\n'
+START_FILE = '<!-- ===================== Bắt đầu dịch Phần ==================== -->\n<!-- ========================================= REVISE PHẦN - BẮT ĐẦU =================================== -->\n'
+END_FILE = '<!-- ===================== Kết thúc dịch Phần ==================== -->\n<!-- ========================================= REVISE PHẦN - KẾT THÚC ===================================-->\n'
SUFIX_PATH = 'contributors_template_vn.md'
# Our special mark in markdown, e.g. :label:`chapter_intro`
|
{"golden_diff": "diff --git a/utils.py b/utils.py\n--- a/utils.py\n+++ b/utils.py\n@@ -10,8 +10,8 @@\n TRANSLATE_INDICATOR = '*d\u1ecbch \u0111o\u1ea1n ph\u00eda tr\u00ean*'\n HEADER_INDICATOR = ' *d\u1ecbch ti\u00eau \u0111\u1ec1 ph\u00eda tr\u00ean*\\n'\n IMAGE_CAPTION_INDICATOR = '*d\u1ecbch ch\u00fa th\u00edch \u1ea3nh ph\u00eda tr\u00ean*'\n-START_FILE = '<!-- ===================== B\u1eaft \u0111\u1ea7u d\u1ecbch Ph\u1ea7n 1 ==================== -->\\n'\n-END_FILE = '<!-- ===================== K\u1ebft th\u00fac d\u1ecbch Ph\u1ea7n 1 ==================== -->\\n'\n+START_FILE = '<!-- ===================== B\u1eaft \u0111\u1ea7u d\u1ecbch Ph\u1ea7n ==================== -->\\n<!-- ========================================= REVISE PH\u1ea6N - B\u1eaeT \u0110\u1ea6U =================================== -->\\n'\n+END_FILE = '<!-- ===================== K\u1ebft th\u00fac d\u1ecbch Ph\u1ea7n ==================== -->\\n<!-- ========================================= REVISE PH\u1ea6N - K\u1ebeT TH\u00daC ===================================-->\\n'\n SUFIX_PATH = 'contributors_template_vn.md'\n \n # Our special mark in markdown, e.g. :label:`chapter_intro`\n", "issue": "Add \"Deep Learning Computation\" chapter\n- [x] index.md\r\n- [x] model-construction.md\r\n- [x] parameters.md\r\n- [x] deferred-init.md\r\n- [x] custom-layer.md\r\n- [x] read-write.md\r\n- [x] use-gpu.md\n", "before_files": [{"content": "# encoding=utf8\nimport codecs\nimport filecmp\nimport re\nimport sys\nimport argparse\n\nBEGIN_BLOCK_COMMENT = '<!--\\n'\nEND_BLOCK_COMMENT = '-->\\n\\n'\nTRANSLATE_INDICATOR = '*d\u1ecbch \u0111o\u1ea1n ph\u00eda tr\u00ean*'\nHEADER_INDICATOR = ' *d\u1ecbch ti\u00eau \u0111\u1ec1 ph\u00eda tr\u00ean*\\n'\nIMAGE_CAPTION_INDICATOR = '*d\u1ecbch ch\u00fa th\u00edch \u1ea3nh ph\u00eda tr\u00ean*'\nSTART_FILE = '<!-- ===================== B\u1eaft \u0111\u1ea7u d\u1ecbch Ph\u1ea7n 1 ==================== -->\\n'\nEND_FILE = '<!-- ===================== K\u1ebft th\u00fac d\u1ecbch Ph\u1ea7n 1 ==================== -->\\n'\nSUFIX_PATH = 'contributors_template_vn.md'\n\n# Our special mark in markdown, e.g. :label:`chapter_intro`\nMARK_RE_MD = re.compile(':([-\\/\\\\._\\w\\d]+):`([\\*-\\/\\\\\\._\\w\\d]+)`')\n\nparser = argparse.ArgumentParser(description='D\u1ecbch Dive into Deep Learning')\nparser.add_argument('--convert', type=str, help='path to md file')\n\n\ndef is_blank_line(line):\n return line.strip() == ''\n\n\nclass MyLine(object):\n def __init__(self, line_str, in_code_block):\n self.line_str = line_str.replace(' -- ', ' \\-\\- ')\n self.in_code_block = in_code_block\n self.end_comment_if_next_line_blank = None\n\n def process(self, file_writer, last_line):\n if self.in_code_block:\n file_writer.write(self.line_str)\n else:\n self._process(file_writer, last_line)\n return self\n\n def _process(self, file_writer, last_line):\n raise NotImplementedError\n\n\nclass NormalLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(NormalLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = True\n\n def _process(self, file_writer, last_line):\n if isinstance(last_line, BlankLine):\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n\n\nclass BlankLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(BlankLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n if last_line.end_comment_if_next_line_blank:\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write(TRANSLATE_INDICATOR)\n file_writer.write('\\n')\n file_writer.write('\\n')\n\n\nclass HeaderLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(HeaderLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n self.heading = 0\n cnt = 0\n for char in self.line_str:\n if char == '#':\n cnt += 1\n elif char == ' ':\n self.heading = cnt\n break\n else:\n assert False, self.line_str\n\n def _process(self, file_writer, last_line):\n assert isinstance(last_line, BlankLine),\\\n last_line.line_str\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write('#'*self.heading + HEADER_INDICATOR)\n\n\nclass ImageLine(MyLine):\n def __init(self, line_str, in_code_block):\n assert not in_code_block\n super(ImageLine, self).__init__(line_str, in_code_block)\n\n def _process(self, file_writer, last_line):\n close_square_bracket_id = self.line_str.index(']')\n assert self.line_str[close_square_bracket_id+1] == '(', self.line_str\n # assert self.line_str.endswith(')'), self.line_str\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write(\n '![' + IMAGE_CAPTION_INDICATOR + ']' + self.line_str[close_square_bracket_id+1:]\n )\n\n\nclass CodeMarkerLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(CodeMarkerLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n \"\"\" the print is printed in the super class\"\"\"\n file_writer.write(self.line_str)\n\n\n\nclass MathLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(MathLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n file_writer.write(self.line_str)\n return self\n\n\nclass LabelLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(LabelLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n # assert isinstance(last_line, HeaderLine) or isinstance(last_line, ImageLine), 'last line: {}\\nthis_line: {}'.format(\n # last_line.line_str, self.line_str\n # )\n file_writer.write(self.line_str)\n # file_writer.write('\\n')\n return self\n\n\ndef block_comment(input_md, output_md, add_prefix_suffix=False):\n last_line = BlankLine('', False)\n in_code_block = False\n with codecs.open(input_md, 'r', encoding='utf-8') as input_handle,\\\n codecs.open(output_md, 'w', encoding='utf-8') as output_handle,\\\n codecs.open(SUFIX_PATH, 'r', encoding='utf-8') as surfix_handle:\n if add_prefix_suffix:\n output_handle.write(START_FILE)\n output_handle.write('\\n')\n for line_str in input_handle:\n line_str = line_str.rstrip() + '\\n'\n line_str = line_str.replace(' -- ', ' \\-\\- ')\n match = MARK_RE_MD.match(line_str)\n if is_blank_line(line_str):\n line_type = BlankLine\n elif line_str.startswith('#'):\n line_type = HeaderLine\n elif line_str.startswith('!['):\n line_type = ImageLine\n elif line_str.startswith('$'):\n line_type = MathLine\n elif line_str.startswith('```'):\n in_code_block = not in_code_block\n line_type = CodeMarkerLine\n elif match is not None and match[1] in ['label', 'eqlabel']:\n line_type = LabelLine\n else:\n line_type = NormalLine\n\n this_line = line_type(line_str, in_code_block)\n last_line = this_line.process(output_handle, last_line)\n\n assert in_code_block is False\n\n # TODO: simplify 5 lines below\n if isinstance(last_line, BlankLine) or isinstance(last_line, LabelLine)\\\n or isinstance(last_line, CodeMarkerLine) or isinstance(last_line, ImageLine):\n print('skip')\n else:\n output_handle.write(END_BLOCK_COMMENT)\n output_handle.write(TRANSLATE_INDICATOR)\n if add_prefix_suffix:\n output_handle.write('\\n')\n output_handle.write(END_FILE)\n output_handle.write('\\n')\n for line in surfix_handle:\n output_handle.write(line)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n input_md = args.convert\n output_md = input_md[:-len('.md')] + '_vn.md'\n block_comment(input_md, output_md, add_prefix_suffix=True)\n", "path": "utils.py"}], "after_files": [{"content": "# encoding=utf8\nimport codecs\nimport filecmp\nimport re\nimport sys\nimport argparse\n\nBEGIN_BLOCK_COMMENT = '<!--\\n'\nEND_BLOCK_COMMENT = '-->\\n\\n'\nTRANSLATE_INDICATOR = '*d\u1ecbch \u0111o\u1ea1n ph\u00eda tr\u00ean*'\nHEADER_INDICATOR = ' *d\u1ecbch ti\u00eau \u0111\u1ec1 ph\u00eda tr\u00ean*\\n'\nIMAGE_CAPTION_INDICATOR = '*d\u1ecbch ch\u00fa th\u00edch \u1ea3nh ph\u00eda tr\u00ean*'\nSTART_FILE = '<!-- ===================== B\u1eaft \u0111\u1ea7u d\u1ecbch Ph\u1ea7n ==================== -->\\n<!-- ========================================= REVISE PH\u1ea6N - B\u1eaeT \u0110\u1ea6U =================================== -->\\n'\nEND_FILE = '<!-- ===================== K\u1ebft th\u00fac d\u1ecbch Ph\u1ea7n ==================== -->\\n<!-- ========================================= REVISE PH\u1ea6N - K\u1ebeT TH\u00daC ===================================-->\\n'\nSUFIX_PATH = 'contributors_template_vn.md'\n\n# Our special mark in markdown, e.g. :label:`chapter_intro`\nMARK_RE_MD = re.compile(':([-\\/\\\\._\\w\\d]+):`([\\*-\\/\\\\\\._\\w\\d]+)`')\n\nparser = argparse.ArgumentParser(description='D\u1ecbch Dive into Deep Learning')\nparser.add_argument('--convert', type=str, help='path to md file')\n\n\ndef is_blank_line(line):\n return line.strip() == ''\n\n\nclass MyLine(object):\n def __init__(self, line_str, in_code_block):\n self.line_str = line_str.replace(' -- ', ' \\-\\- ')\n self.in_code_block = in_code_block\n self.end_comment_if_next_line_blank = None\n\n def process(self, file_writer, last_line):\n if self.in_code_block:\n file_writer.write(self.line_str)\n else:\n self._process(file_writer, last_line)\n return self\n\n def _process(self, file_writer, last_line):\n raise NotImplementedError\n\n\nclass NormalLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(NormalLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = True\n\n def _process(self, file_writer, last_line):\n if isinstance(last_line, BlankLine):\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n\n\nclass BlankLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(BlankLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n if last_line.end_comment_if_next_line_blank:\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write(TRANSLATE_INDICATOR)\n file_writer.write('\\n')\n file_writer.write('\\n')\n\n\nclass HeaderLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(HeaderLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n self.heading = 0\n cnt = 0\n for char in self.line_str:\n if char == '#':\n cnt += 1\n elif char == ' ':\n self.heading = cnt\n break\n else:\n assert False, self.line_str\n\n def _process(self, file_writer, last_line):\n assert isinstance(last_line, BlankLine),\\\n last_line.line_str\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write('#'*self.heading + HEADER_INDICATOR)\n\n\nclass ImageLine(MyLine):\n def __init(self, line_str, in_code_block):\n assert not in_code_block\n super(ImageLine, self).__init__(line_str, in_code_block)\n\n def _process(self, file_writer, last_line):\n close_square_bracket_id = self.line_str.index(']')\n assert self.line_str[close_square_bracket_id+1] == '(', self.line_str\n # assert self.line_str.endswith(')'), self.line_str\n file_writer.write(BEGIN_BLOCK_COMMENT)\n file_writer.write(self.line_str)\n file_writer.write(END_BLOCK_COMMENT)\n file_writer.write(\n '![' + IMAGE_CAPTION_INDICATOR + ']' + self.line_str[close_square_bracket_id+1:]\n )\n\n\nclass CodeMarkerLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(CodeMarkerLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n \"\"\" the print is printed in the super class\"\"\"\n file_writer.write(self.line_str)\n\n\n\nclass MathLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(MathLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n file_writer.write(self.line_str)\n return self\n\n\nclass LabelLine(MyLine):\n def __init__(self, line_str, in_code_block):\n super(LabelLine, self).__init__(line_str, in_code_block)\n self.end_comment_if_next_line_blank = False\n\n def _process(self, file_writer, last_line):\n # assert isinstance(last_line, HeaderLine) or isinstance(last_line, ImageLine), 'last line: {}\\nthis_line: {}'.format(\n # last_line.line_str, self.line_str\n # )\n file_writer.write(self.line_str)\n # file_writer.write('\\n')\n return self\n\n\ndef block_comment(input_md, output_md, add_prefix_suffix=False):\n last_line = BlankLine('', False)\n in_code_block = False\n with codecs.open(input_md, 'r', encoding='utf-8') as input_handle,\\\n codecs.open(output_md, 'w', encoding='utf-8') as output_handle,\\\n codecs.open(SUFIX_PATH, 'r', encoding='utf-8') as surfix_handle:\n if add_prefix_suffix:\n output_handle.write(START_FILE)\n output_handle.write('\\n')\n for line_str in input_handle:\n line_str = line_str.rstrip() + '\\n'\n line_str = line_str.replace(' -- ', ' \\-\\- ')\n match = MARK_RE_MD.match(line_str)\n if is_blank_line(line_str):\n line_type = BlankLine\n elif line_str.startswith('#'):\n line_type = HeaderLine\n elif line_str.startswith('!['):\n line_type = ImageLine\n elif line_str.startswith('$'):\n line_type = MathLine\n elif line_str.startswith('```'):\n in_code_block = not in_code_block\n line_type = CodeMarkerLine\n elif match is not None and match[1] in ['label', 'eqlabel']:\n line_type = LabelLine\n else:\n line_type = NormalLine\n\n this_line = line_type(line_str, in_code_block)\n last_line = this_line.process(output_handle, last_line)\n\n assert in_code_block is False\n\n # TODO: simplify 5 lines below\n if isinstance(last_line, BlankLine) or isinstance(last_line, LabelLine)\\\n or isinstance(last_line, CodeMarkerLine) or isinstance(last_line, ImageLine):\n print('skip')\n else:\n output_handle.write(END_BLOCK_COMMENT)\n output_handle.write(TRANSLATE_INDICATOR)\n if add_prefix_suffix:\n output_handle.write('\\n')\n output_handle.write(END_FILE)\n output_handle.write('\\n')\n for line in surfix_handle:\n output_handle.write(line)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n input_md = args.convert\n output_md = input_md[:-len('.md')] + '_vn.md'\n block_comment(input_md, output_md, add_prefix_suffix=True)\n", "path": "utils.py"}]}
| 2,466 | 229 |
gh_patches_debug_65233
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-4763
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.huya: As of today, Huya plugin has been broken
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
When I try to open any public Huya stream I get an error message. Assuming Huya has changed how live IDs are handled and switched to strings.
### Debug log
```text
hina@Hinas-MacBook-Pro ~ % streamlink https://www.huya.com/660108 best --loglevel debug
[cli][debug] OS: macOS 12.5
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 4.3.0
[cli][debug] Dependencies:
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] websocket-client: 1.3.3
[cli][debug] Arguments:
[cli][debug] url=https://www.huya.com/660108
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin huya for URL https://www.huya.com/660108
error: Unable to validate response text: ValidationError(NoneOrAllSchema):
ValidationError(dict):
Unable to validate value of key 'data'
Context(AnySchema):
ValidationError(dict):
Unable to validate value of key 'gameLiveInfo'
Context(dict):
Unable to validate value of key 'liveId'
Context(type):
Type of '7134607205476108031' should be int, but is str
hina@Hinas-MacBook-Pro ~ %
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/huya.py`
Content:
```
1 """
2 $description Chinese live-streaming platform for live video game broadcasts and individual live streams.
3 $url huya.com
4 $type live
5 """
6
7 import base64
8 import logging
9 import re
10 from html import unescape as html_unescape
11 from typing import Dict
12
13 from streamlink.plugin import Plugin, pluginmatcher
14 from streamlink.plugin.api import validate
15 from streamlink.stream.http import HTTPStream
16
17 log = logging.getLogger(__name__)
18
19
20 @pluginmatcher(re.compile(
21 r"https?://(?:www\.)?huya\.com/(?P<channel>[^/]+)"
22 ))
23 class Huya(Plugin):
24 QUALITY_WEIGHTS: Dict[str, int] = {}
25
26 @classmethod
27 def stream_weight(cls, key):
28 weight = cls.QUALITY_WEIGHTS.get(key)
29 if weight:
30 return weight, "huya"
31
32 return super().stream_weight(key)
33
34 def _get_streams(self):
35 data = self.session.http.get(self.url, schema=validate.Schema(
36 validate.parse_html(),
37 validate.xml_xpath_string(".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()"),
38 validate.none_or_all(
39 re.compile(r"""(?P<q>"?)stream(?P=q)\s*:\s*(?:"(?P<base64>.+?)"|(?P<json>\{.+?})\s*}\s*;)"""),
40 ),
41 validate.none_or_all(
42 validate.any(
43 validate.all(
44 validate.get("base64"),
45 str,
46 validate.transform(base64.b64decode),
47 ),
48 validate.all(
49 validate.get("json"),
50 str,
51 ),
52 ),
53 validate.parse_json(),
54 {
55 "data": [{
56 "gameLiveInfo": {
57 "liveId": int,
58 "nick": str,
59 "roomName": str,
60 },
61 "gameStreamInfoList": [validate.all(
62 {
63 "sCdnType": str,
64 "iPCPriorityRate": int,
65 "sStreamName": str,
66 "sFlvUrl": str,
67 "sFlvUrlSuffix": str,
68 "sFlvAntiCode": validate.all(str, validate.transform(lambda v: html_unescape(v))),
69 },
70 validate.union_get(
71 "sCdnType",
72 "iPCPriorityRate",
73 "sStreamName",
74 "sFlvUrl",
75 "sFlvUrlSuffix",
76 "sFlvAntiCode",
77 )),
78 ],
79 }],
80 },
81 validate.get(("data", 0)),
82 validate.union_get(
83 ("gameLiveInfo", "liveId"),
84 ("gameLiveInfo", "nick"),
85 ("gameLiveInfo", "roomName"),
86 "gameStreamInfoList",
87 ),
88 ),
89 ))
90 if not data:
91 return
92
93 self.id, self.author, self.title, streamdata = data
94
95 for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata:
96 name = f"source_{cdntype.lower()}"
97 self.QUALITY_WEIGHTS[name] = priority
98 yield name, HTTPStream(self.session, f"{flvurl}/{streamname}.{suffix}?{anticode}")
99
100 log.debug(f"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}")
101
102
103 __plugin__ = Huya
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/huya.py b/src/streamlink/plugins/huya.py
--- a/src/streamlink/plugins/huya.py
+++ b/src/streamlink/plugins/huya.py
@@ -54,7 +54,7 @@
{
"data": [{
"gameLiveInfo": {
- "liveId": int,
+ "liveId": str,
"nick": str,
"roomName": str,
},
|
{"golden_diff": "diff --git a/src/streamlink/plugins/huya.py b/src/streamlink/plugins/huya.py\n--- a/src/streamlink/plugins/huya.py\n+++ b/src/streamlink/plugins/huya.py\n@@ -54,7 +54,7 @@\n {\n \"data\": [{\n \"gameLiveInfo\": {\n- \"liveId\": int,\n+ \"liveId\": str,\n \"nick\": str,\n \"roomName\": str,\n },\n", "issue": "plugins.huya: As of today, Huya plugin has been broken\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nWhen I try to open any public Huya stream I get an error message. Assuming Huya has changed how live IDs are handled and switched to strings.\n\n### Debug log\n\n```text\nhina@Hinas-MacBook-Pro ~ % streamlink https://www.huya.com/660108 best --loglevel debug\r\n[cli][debug] OS: macOS 12.5\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 4.3.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] websocket-client: 1.3.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.huya.com/660108\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin huya for URL https://www.huya.com/660108\r\nerror: Unable to validate response text: ValidationError(NoneOrAllSchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'data'\r\n Context(AnySchema):\r\n ValidationError(dict):\r\n Unable to validate value of key 'gameLiveInfo'\r\n Context(dict):\r\n Unable to validate value of key 'liveId'\r\n Context(type):\r\n Type of '7134607205476108031' should be int, but is str\r\nhina@Hinas-MacBook-Pro ~ %\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Chinese live-streaming platform for live video game broadcasts and individual live streams.\n$url huya.com\n$type live\n\"\"\"\n\nimport base64\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom typing import Dict\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?huya\\.com/(?P<channel>[^/]+)\"\n))\nclass Huya(Plugin):\n QUALITY_WEIGHTS: Dict[str, int] = {}\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"huya\"\n\n return super().stream_weight(key)\n\n def _get_streams(self):\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()\"),\n validate.none_or_all(\n re.compile(r\"\"\"(?P<q>\"?)stream(?P=q)\\s*:\\s*(?:\"(?P<base64>.+?)\"|(?P<json>\\{.+?})\\s*}\\s*;)\"\"\"),\n ),\n validate.none_or_all(\n validate.any(\n validate.all(\n validate.get(\"base64\"),\n str,\n validate.transform(base64.b64decode),\n ),\n validate.all(\n validate.get(\"json\"),\n str,\n ),\n ),\n validate.parse_json(),\n {\n \"data\": [{\n \"gameLiveInfo\": {\n \"liveId\": int,\n \"nick\": str,\n \"roomName\": str,\n },\n \"gameStreamInfoList\": [validate.all(\n {\n \"sCdnType\": str,\n \"iPCPriorityRate\": int,\n \"sStreamName\": str,\n \"sFlvUrl\": str,\n \"sFlvUrlSuffix\": str,\n \"sFlvAntiCode\": validate.all(str, validate.transform(lambda v: html_unescape(v))),\n },\n validate.union_get(\n \"sCdnType\",\n \"iPCPriorityRate\",\n \"sStreamName\",\n \"sFlvUrl\",\n \"sFlvUrlSuffix\",\n \"sFlvAntiCode\",\n )),\n ],\n }],\n },\n validate.get((\"data\", 0)),\n validate.union_get(\n (\"gameLiveInfo\", \"liveId\"),\n (\"gameLiveInfo\", \"nick\"),\n (\"gameLiveInfo\", \"roomName\"),\n \"gameStreamInfoList\",\n ),\n ),\n ))\n if not data:\n return\n\n self.id, self.author, self.title, streamdata = data\n\n for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata:\n name = f\"source_{cdntype.lower()}\"\n self.QUALITY_WEIGHTS[name] = priority\n yield name, HTTPStream(self.session, f\"{flvurl}/{streamname}.{suffix}?{anticode}\")\n\n log.debug(f\"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}\")\n\n\n__plugin__ = Huya\n", "path": "src/streamlink/plugins/huya.py"}], "after_files": [{"content": "\"\"\"\n$description Chinese live-streaming platform for live video game broadcasts and individual live streams.\n$url huya.com\n$type live\n\"\"\"\n\nimport base64\nimport logging\nimport re\nfrom html import unescape as html_unescape\nfrom typing import Dict\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?huya\\.com/(?P<channel>[^/]+)\"\n))\nclass Huya(Plugin):\n QUALITY_WEIGHTS: Dict[str, int] = {}\n\n @classmethod\n def stream_weight(cls, key):\n weight = cls.QUALITY_WEIGHTS.get(key)\n if weight:\n return weight, \"huya\"\n\n return super().stream_weight(key)\n\n def _get_streams(self):\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'var hyPlayerConfig = {')][1]/text()\"),\n validate.none_or_all(\n re.compile(r\"\"\"(?P<q>\"?)stream(?P=q)\\s*:\\s*(?:\"(?P<base64>.+?)\"|(?P<json>\\{.+?})\\s*}\\s*;)\"\"\"),\n ),\n validate.none_or_all(\n validate.any(\n validate.all(\n validate.get(\"base64\"),\n str,\n validate.transform(base64.b64decode),\n ),\n validate.all(\n validate.get(\"json\"),\n str,\n ),\n ),\n validate.parse_json(),\n {\n \"data\": [{\n \"gameLiveInfo\": {\n \"liveId\": str,\n \"nick\": str,\n \"roomName\": str,\n },\n \"gameStreamInfoList\": [validate.all(\n {\n \"sCdnType\": str,\n \"iPCPriorityRate\": int,\n \"sStreamName\": str,\n \"sFlvUrl\": str,\n \"sFlvUrlSuffix\": str,\n \"sFlvAntiCode\": validate.all(str, validate.transform(lambda v: html_unescape(v))),\n },\n validate.union_get(\n \"sCdnType\",\n \"iPCPriorityRate\",\n \"sStreamName\",\n \"sFlvUrl\",\n \"sFlvUrlSuffix\",\n \"sFlvAntiCode\",\n )),\n ],\n }],\n },\n validate.get((\"data\", 0)),\n validate.union_get(\n (\"gameLiveInfo\", \"liveId\"),\n (\"gameLiveInfo\", \"nick\"),\n (\"gameLiveInfo\", \"roomName\"),\n \"gameStreamInfoList\",\n ),\n ),\n ))\n if not data:\n return\n\n self.id, self.author, self.title, streamdata = data\n\n for cdntype, priority, streamname, flvurl, suffix, anticode in streamdata:\n name = f\"source_{cdntype.lower()}\"\n self.QUALITY_WEIGHTS[name] = priority\n yield name, HTTPStream(self.session, f\"{flvurl}/{streamname}.{suffix}?{anticode}\")\n\n log.debug(f\"QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}\")\n\n\n__plugin__ = Huya\n", "path": "src/streamlink/plugins/huya.py"}]}
| 1,772 | 97 |
gh_patches_debug_30517
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2148
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Something is wrong with the data explorer....
<!-- A clear and concise description of what the bug is. -->
I'm trying to make the Overdue Books report and ended up with this:
<img width="1151" alt="Screen Shot 2022-12-20 at 7 23 10 PM" src="https://user-images.githubusercontent.com/287034/208792427-f8b7bc2a-7228-4c11-90d0-d95766bcd7e4.png">
Aaron Black has not checked out the same book over and over again, here's what his checkouts actually look like.

If I try to recreate the exploration, everything looks fine - I add Patron name, email, and book title (I selected do not summarize) and get this:

Then I add Due Date from Checkouts and everything goes nuts

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/queries/base.py`
Content:
```
1 from frozendict import frozendict
2 from sqlalchemy import select
3
4 from db.records.operations import select as records_select
5 from db.columns.base import MathesarColumn
6 from db.columns.operations.select import get_column_name_from_attnum
7 from db.tables.operations.select import reflect_table_from_oid
8 from db.transforms.operations.apply import apply_transformations
9 from db.metadata import get_empty_metadata
10
11
12 class DBQuery:
13 def __init__(
14 self,
15 base_table_oid,
16 initial_columns,
17 engine,
18 transformations=None,
19 name=None,
20 # The same metadata will be used by all the methods within DBQuery
21 # So make sure to change the metadata in case the DBQuery methods are called
22 # after a mutation to the database object that could make the existing metadata invalid.
23 metadata=None
24 ):
25 self.base_table_oid = base_table_oid
26 for initial_col in initial_columns:
27 assert isinstance(initial_col, InitialColumn)
28 self.initial_columns = initial_columns
29 self.engine = engine
30 if transformations is None:
31 # Less states to consider if no transformations is just an empty sequence
32 transformations = tuple()
33 self.transformations = transformations
34 self.name = name
35 self.metadata = metadata if metadata else get_empty_metadata()
36
37 def get_input_aliases(self, ix_of_transform):
38 """
39 Each transformation in a DBQuery has its own list of input aliases; this returns it.
40 """
41 initial_aliases = self.initial_aliases
42 if ix_of_transform == 0:
43 return initial_aliases
44 input_aliases = initial_aliases
45 previous_transforms = self.transformations[:ix_of_transform]
46 for transform in previous_transforms:
47 output_aliases = transform.get_output_aliases(input_aliases)
48 input_aliases = output_aliases
49 return input_aliases
50
51 def get_initial_column_by_input_alias(self, ix_of_transform, input_alias):
52 """
53 Retraces the chain of input aliases until it gets to an initial column.
54
55 Returns None if the alias does not originate from an initial column in a way that would
56 preserve a unique constraint. E.g. if it is generated by an aggregation.
57 """
58 initial_col_alias = \
59 self._get_initial_alias_by_input_alias(ix_of_transform, input_alias)
60 if initial_col_alias is None:
61 return None
62 initial_column = \
63 self._get_initial_column_by_initial_column_alias(initial_col_alias)
64 return initial_column
65
66 def _get_initial_alias_by_input_alias(self, ix_of_transform, input_alias):
67 if ix_of_transform == 0:
68 return input_alias
69 transforms = self.transformations[:ix_of_transform]
70 initial_aliases = self.initial_aliases
71 input_aliases = initial_aliases
72 uc_mappings_for_each_transform = [
73 transform.get_unique_constraint_mappings(input_aliases)
74 for transform in transforms
75 ]
76 for uc_mappings in reversed(uc_mappings_for_each_transform):
77 for uc_mapping in uc_mappings:
78 if uc_mapping.output_alias == input_alias:
79 input_alias = uc_mapping.input_alias
80 if input_alias is None:
81 return None
82 break
83 initial_alias = input_alias
84 return initial_alias
85
86 def _get_initial_column_by_initial_column_alias(self, alias):
87 """
88 Looks up an initial column by initial column alias; no recursive logic.
89 """
90 for initial_column in self.initial_columns:
91 if initial_column.alias == alias:
92 return initial_column
93
94 @property
95 def initial_aliases(self):
96 return [
97 initial_column.alias
98 for initial_column
99 in self.initial_columns
100 ]
101
102 # mirrors a method in db.records.operations.select
103 def get_records(self, **kwargs):
104 # NOTE how through this method you can perform a second batch of
105 # transformations. this reflects fact that we can form a query, and
106 # then apply temporary transforms on it, like how you can apply
107 # temporary transforms to a table when in a table view.
108 return records_select.get_records_with_default_order(
109 table=self.transformed_relation, engine=self.engine, **kwargs,
110 )
111
112 # mirrors a method in db.records.operations.select
113 def get_count(self, **kwargs):
114 return records_select.get_count(
115 table=self.transformed_relation, engine=self.engine, **kwargs,
116 )
117
118 # NOTE if too expensive, can be rewritten to parse DBQuery spec, instead of leveraging sqlalchemy
119 @property
120 def all_sa_columns_map(self):
121 """
122 Expensive! use with care.
123 """
124 initial_columns_map = {
125 col.name: MathesarColumn.from_column(col, engine=self.engine)
126 for col in self.initial_relation.columns
127 }
128 output_columns_map = {
129 col.name: col for col in self.sa_output_columns
130 }
131 transforms_columns_map = {} if self.transformations is None else {
132 col.name: MathesarColumn.from_column(col, engine=self.engine)
133 for i in range(len(self.transformations))
134 for col in DBQuery(
135 base_table_oid=self.base_table_oid,
136 initial_columns=self.initial_columns,
137 engine=self.engine,
138 transformations=self.transformations[:i],
139 name=f'{self.name}_{i}'
140 ).transformed_relation.columns
141 }
142 map_of_alias_to_sa_col = initial_columns_map | transforms_columns_map | output_columns_map
143 return map_of_alias_to_sa_col
144
145 @property
146 def sa_output_columns(self):
147 """
148 Sequence of SQLAlchemy columns representing the output columns of the
149 relation described by this query.
150 """
151 return tuple(
152 MathesarColumn.from_column(sa_col, engine=self.engine)
153 for sa_col
154 in self.transformed_relation.columns
155 )
156
157 @property
158 def transformed_relation(self):
159 """
160 A query describes a relation. This property is the result of parsing a
161 query into a relation.
162 """
163 transformations = self.transformations
164 if transformations:
165 transformed = apply_transformations(
166 self.initial_relation,
167 transformations,
168 )
169 return transformed
170 else:
171 return self.initial_relation
172
173 @property
174 def initial_relation(self):
175 metadata = self.metadata
176 base_table = reflect_table_from_oid(
177 self.base_table_oid, self.engine, metadata=metadata
178 )
179 from_clause = base_table
180 # We cache this to avoid copies of the same join path to a given table
181 jp_path_alias_map = {(): base_table}
182
183 def _get_table(oid):
184 """
185 We use the function-scoped metadata so all involved tables are aware
186 of each other.
187 """
188 return reflect_table_from_oid(oid, self.engine, metadata=metadata, keep_existing=True)
189
190 def _get_column_name(oid, attnum):
191 return get_column_name_from_attnum(oid, attnum, self.engine, metadata=metadata)
192
193 def _process_initial_column(col):
194 nonlocal from_clause
195 col_name = _get_column_name(col.reloid, col.attnum)
196 # Make the path hashable so it can be a dict key
197 jp_path = _guarantee_jp_path_tuples(col.jp_path)
198 right = base_table
199
200 for i, jp in enumerate(jp_path):
201 left = jp_path_alias_map[jp_path[:i]]
202 right = _get_table(jp[1][0]).alias()
203 jp_path_alias_map[jp_path[:i + 1]] = right
204 left_col_name = _get_column_name(jp[0][0], jp[0][1])
205 right_col_name = _get_column_name(jp[1][0], jp[1][1])
206 left_col = left.columns[left_col_name]
207 right_col = right.columns[right_col_name]
208 from_clause = from_clause.join(
209 right, onclause=left_col == right_col, isouter=True,
210 )
211
212 return right.columns[col_name].label(col.alias)
213
214 stmt = select(
215 [_process_initial_column(col) for col in self.initial_columns]
216 ).select_from(from_clause)
217 return stmt.cte()
218
219 def get_input_alias_for_output_alias(self, output_alias):
220 return self.map_of_output_alias_to_input_alias.get(output_alias)
221
222 # TODO consider caching; not urgent, since redundant calls don't trigger IO, it seems
223 @property
224 def map_of_output_alias_to_input_alias(self):
225 m = dict()
226 transforms = self.transformations
227 if transforms:
228 for transform in transforms:
229 m = m | transform.map_of_output_alias_to_input_alias
230 return m
231
232
233 class InitialColumn:
234 def __init__(
235 self,
236 # TODO consider renaming to oid; reloid is not a term we use,
237 # even if it's what postgres uses; or use reloid more
238 reloid,
239 attnum,
240 alias,
241 jp_path=None,
242 ):
243 # alias mustn't be an empty string
244 assert isinstance(alias, str) and alias.strip() != ""
245 self.reloid = reloid
246 self.attnum = attnum
247 self.alias = alias
248 self.jp_path = _guarantee_jp_path_tuples(jp_path)
249
250 @property
251 def is_base_column(self):
252 """
253 A base column is an initial column on a query's base table.
254 """
255 return self.jp_path is None
256
257 def __eq__(self, other):
258 """Instances are equal when attributes are equal."""
259 if type(other) is type(self):
260 return self.__dict__ == other.__dict__
261 return False
262
263 def __hash__(self):
264 """Hashes are equal when attributes are equal."""
265 return hash(frozendict(self.__dict__))
266
267
268 def _guarantee_jp_path_tuples(jp_path):
269 """
270 Makes sure that jp_path is made up of tuples or is an empty tuple.
271 """
272 if jp_path is not None:
273 return tuple(
274 (
275 tuple(edge[0]),
276 tuple(edge[1]),
277 )
278 for edge
279 in jp_path
280 )
281 else:
282 return tuple()
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/queries/base.py b/db/queries/base.py
--- a/db/queries/base.py
+++ b/db/queries/base.py
@@ -179,6 +179,7 @@
from_clause = base_table
# We cache this to avoid copies of the same join path to a given table
jp_path_alias_map = {(): base_table}
+ jp_path_unique_set = set()
def _get_table(oid):
"""
@@ -199,15 +200,22 @@
for i, jp in enumerate(jp_path):
left = jp_path_alias_map[jp_path[:i]]
- right = _get_table(jp[1][0]).alias()
- jp_path_alias_map[jp_path[:i + 1]] = right
+ right_table = jp_path[:i + 1]
+ if right_table in jp_path_alias_map:
+ right = jp_path_alias_map[right_table]
+ else:
+ right = _get_table(jp[1][0]).alias()
+ jp_path_alias_map[jp_path[:i + 1]] = right
left_col_name = _get_column_name(jp[0][0], jp[0][1])
right_col_name = _get_column_name(jp[1][0], jp[1][1])
left_col = left.columns[left_col_name]
right_col = right.columns[right_col_name]
- from_clause = from_clause.join(
- right, onclause=left_col == right_col, isouter=True,
- )
+ join_columns = f"{left_col}, {right_col}"
+ if join_columns not in jp_path_unique_set:
+ jp_path_unique_set.add(join_columns)
+ from_clause = from_clause.join(
+ right, onclause=left_col == right_col, isouter=True,
+ )
return right.columns[col_name].label(col.alias)
|
{"golden_diff": "diff --git a/db/queries/base.py b/db/queries/base.py\n--- a/db/queries/base.py\n+++ b/db/queries/base.py\n@@ -179,6 +179,7 @@\n from_clause = base_table\n # We cache this to avoid copies of the same join path to a given table\n jp_path_alias_map = {(): base_table}\n+ jp_path_unique_set = set()\n \n def _get_table(oid):\n \"\"\"\n@@ -199,15 +200,22 @@\n \n for i, jp in enumerate(jp_path):\n left = jp_path_alias_map[jp_path[:i]]\n- right = _get_table(jp[1][0]).alias()\n- jp_path_alias_map[jp_path[:i + 1]] = right\n+ right_table = jp_path[:i + 1]\n+ if right_table in jp_path_alias_map:\n+ right = jp_path_alias_map[right_table]\n+ else:\n+ right = _get_table(jp[1][0]).alias()\n+ jp_path_alias_map[jp_path[:i + 1]] = right\n left_col_name = _get_column_name(jp[0][0], jp[0][1])\n right_col_name = _get_column_name(jp[1][0], jp[1][1])\n left_col = left.columns[left_col_name]\n right_col = right.columns[right_col_name]\n- from_clause = from_clause.join(\n- right, onclause=left_col == right_col, isouter=True,\n- )\n+ join_columns = f\"{left_col}, {right_col}\"\n+ if join_columns not in jp_path_unique_set:\n+ jp_path_unique_set.add(join_columns)\n+ from_clause = from_clause.join(\n+ right, onclause=left_col == right_col, isouter=True,\n+ )\n \n return right.columns[col_name].label(col.alias)\n", "issue": "Something is wrong with the data explorer....\n<!-- A clear and concise description of what the bug is. -->\r\nI'm trying to make the Overdue Books report and ended up with this:\r\n<img width=\"1151\" alt=\"Screen Shot 2022-12-20 at 7 23 10 PM\" src=\"https://user-images.githubusercontent.com/287034/208792427-f8b7bc2a-7228-4c11-90d0-d95766bcd7e4.png\">\r\n\r\nAaron Black has not checked out the same book over and over again, here's what his checkouts actually look like.\r\n\r\n\r\nIf I try to recreate the exploration, everything looks fine - I add Patron name, email, and book title (I selected do not summarize) and get this:\r\n\r\n\r\nThen I add Due Date from Checkouts and everything goes nuts\r\n\r\n\r\n\n", "before_files": [{"content": "from frozendict import frozendict\nfrom sqlalchemy import select\n\nfrom db.records.operations import select as records_select\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.select import get_column_name_from_attnum\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.transforms.operations.apply import apply_transformations\nfrom db.metadata import get_empty_metadata\n\n\nclass DBQuery:\n def __init__(\n self,\n base_table_oid,\n initial_columns,\n engine,\n transformations=None,\n name=None,\n # The same metadata will be used by all the methods within DBQuery\n # So make sure to change the metadata in case the DBQuery methods are called\n # after a mutation to the database object that could make the existing metadata invalid.\n metadata=None\n ):\n self.base_table_oid = base_table_oid\n for initial_col in initial_columns:\n assert isinstance(initial_col, InitialColumn)\n self.initial_columns = initial_columns\n self.engine = engine\n if transformations is None:\n # Less states to consider if no transformations is just an empty sequence\n transformations = tuple()\n self.transformations = transformations\n self.name = name\n self.metadata = metadata if metadata else get_empty_metadata()\n\n def get_input_aliases(self, ix_of_transform):\n \"\"\"\n Each transformation in a DBQuery has its own list of input aliases; this returns it.\n \"\"\"\n initial_aliases = self.initial_aliases\n if ix_of_transform == 0:\n return initial_aliases\n input_aliases = initial_aliases\n previous_transforms = self.transformations[:ix_of_transform]\n for transform in previous_transforms:\n output_aliases = transform.get_output_aliases(input_aliases)\n input_aliases = output_aliases\n return input_aliases\n\n def get_initial_column_by_input_alias(self, ix_of_transform, input_alias):\n \"\"\"\n Retraces the chain of input aliases until it gets to an initial column.\n\n Returns None if the alias does not originate from an initial column in a way that would\n preserve a unique constraint. E.g. if it is generated by an aggregation.\n \"\"\"\n initial_col_alias = \\\n self._get_initial_alias_by_input_alias(ix_of_transform, input_alias)\n if initial_col_alias is None:\n return None\n initial_column = \\\n self._get_initial_column_by_initial_column_alias(initial_col_alias)\n return initial_column\n\n def _get_initial_alias_by_input_alias(self, ix_of_transform, input_alias):\n if ix_of_transform == 0:\n return input_alias\n transforms = self.transformations[:ix_of_transform]\n initial_aliases = self.initial_aliases\n input_aliases = initial_aliases\n uc_mappings_for_each_transform = [\n transform.get_unique_constraint_mappings(input_aliases)\n for transform in transforms\n ]\n for uc_mappings in reversed(uc_mappings_for_each_transform):\n for uc_mapping in uc_mappings:\n if uc_mapping.output_alias == input_alias:\n input_alias = uc_mapping.input_alias\n if input_alias is None:\n return None\n break\n initial_alias = input_alias\n return initial_alias\n\n def _get_initial_column_by_initial_column_alias(self, alias):\n \"\"\"\n Looks up an initial column by initial column alias; no recursive logic.\n \"\"\"\n for initial_column in self.initial_columns:\n if initial_column.alias == alias:\n return initial_column\n\n @property\n def initial_aliases(self):\n return [\n initial_column.alias\n for initial_column\n in self.initial_columns\n ]\n\n # mirrors a method in db.records.operations.select\n def get_records(self, **kwargs):\n # NOTE how through this method you can perform a second batch of\n # transformations. this reflects fact that we can form a query, and\n # then apply temporary transforms on it, like how you can apply\n # temporary transforms to a table when in a table view.\n return records_select.get_records_with_default_order(\n table=self.transformed_relation, engine=self.engine, **kwargs,\n )\n\n # mirrors a method in db.records.operations.select\n def get_count(self, **kwargs):\n return records_select.get_count(\n table=self.transformed_relation, engine=self.engine, **kwargs,\n )\n\n # NOTE if too expensive, can be rewritten to parse DBQuery spec, instead of leveraging sqlalchemy\n @property\n def all_sa_columns_map(self):\n \"\"\"\n Expensive! use with care.\n \"\"\"\n initial_columns_map = {\n col.name: MathesarColumn.from_column(col, engine=self.engine)\n for col in self.initial_relation.columns\n }\n output_columns_map = {\n col.name: col for col in self.sa_output_columns\n }\n transforms_columns_map = {} if self.transformations is None else {\n col.name: MathesarColumn.from_column(col, engine=self.engine)\n for i in range(len(self.transformations))\n for col in DBQuery(\n base_table_oid=self.base_table_oid,\n initial_columns=self.initial_columns,\n engine=self.engine,\n transformations=self.transformations[:i],\n name=f'{self.name}_{i}'\n ).transformed_relation.columns\n }\n map_of_alias_to_sa_col = initial_columns_map | transforms_columns_map | output_columns_map\n return map_of_alias_to_sa_col\n\n @property\n def sa_output_columns(self):\n \"\"\"\n Sequence of SQLAlchemy columns representing the output columns of the\n relation described by this query.\n \"\"\"\n return tuple(\n MathesarColumn.from_column(sa_col, engine=self.engine)\n for sa_col\n in self.transformed_relation.columns\n )\n\n @property\n def transformed_relation(self):\n \"\"\"\n A query describes a relation. This property is the result of parsing a\n query into a relation.\n \"\"\"\n transformations = self.transformations\n if transformations:\n transformed = apply_transformations(\n self.initial_relation,\n transformations,\n )\n return transformed\n else:\n return self.initial_relation\n\n @property\n def initial_relation(self):\n metadata = self.metadata\n base_table = reflect_table_from_oid(\n self.base_table_oid, self.engine, metadata=metadata\n )\n from_clause = base_table\n # We cache this to avoid copies of the same join path to a given table\n jp_path_alias_map = {(): base_table}\n\n def _get_table(oid):\n \"\"\"\n We use the function-scoped metadata so all involved tables are aware\n of each other.\n \"\"\"\n return reflect_table_from_oid(oid, self.engine, metadata=metadata, keep_existing=True)\n\n def _get_column_name(oid, attnum):\n return get_column_name_from_attnum(oid, attnum, self.engine, metadata=metadata)\n\n def _process_initial_column(col):\n nonlocal from_clause\n col_name = _get_column_name(col.reloid, col.attnum)\n # Make the path hashable so it can be a dict key\n jp_path = _guarantee_jp_path_tuples(col.jp_path)\n right = base_table\n\n for i, jp in enumerate(jp_path):\n left = jp_path_alias_map[jp_path[:i]]\n right = _get_table(jp[1][0]).alias()\n jp_path_alias_map[jp_path[:i + 1]] = right\n left_col_name = _get_column_name(jp[0][0], jp[0][1])\n right_col_name = _get_column_name(jp[1][0], jp[1][1])\n left_col = left.columns[left_col_name]\n right_col = right.columns[right_col_name]\n from_clause = from_clause.join(\n right, onclause=left_col == right_col, isouter=True,\n )\n\n return right.columns[col_name].label(col.alias)\n\n stmt = select(\n [_process_initial_column(col) for col in self.initial_columns]\n ).select_from(from_clause)\n return stmt.cte()\n\n def get_input_alias_for_output_alias(self, output_alias):\n return self.map_of_output_alias_to_input_alias.get(output_alias)\n\n # TODO consider caching; not urgent, since redundant calls don't trigger IO, it seems\n @property\n def map_of_output_alias_to_input_alias(self):\n m = dict()\n transforms = self.transformations\n if transforms:\n for transform in transforms:\n m = m | transform.map_of_output_alias_to_input_alias\n return m\n\n\nclass InitialColumn:\n def __init__(\n self,\n # TODO consider renaming to oid; reloid is not a term we use,\n # even if it's what postgres uses; or use reloid more\n reloid,\n attnum,\n alias,\n jp_path=None,\n ):\n # alias mustn't be an empty string\n assert isinstance(alias, str) and alias.strip() != \"\"\n self.reloid = reloid\n self.attnum = attnum\n self.alias = alias\n self.jp_path = _guarantee_jp_path_tuples(jp_path)\n\n @property\n def is_base_column(self):\n \"\"\"\n A base column is an initial column on a query's base table.\n \"\"\"\n return self.jp_path is None\n\n def __eq__(self, other):\n \"\"\"Instances are equal when attributes are equal.\"\"\"\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __hash__(self):\n \"\"\"Hashes are equal when attributes are equal.\"\"\"\n return hash(frozendict(self.__dict__))\n\n\ndef _guarantee_jp_path_tuples(jp_path):\n \"\"\"\n Makes sure that jp_path is made up of tuples or is an empty tuple.\n \"\"\"\n if jp_path is not None:\n return tuple(\n (\n tuple(edge[0]),\n tuple(edge[1]),\n )\n for edge\n in jp_path\n )\n else:\n return tuple()\n", "path": "db/queries/base.py"}], "after_files": [{"content": "from frozendict import frozendict\nfrom sqlalchemy import select\n\nfrom db.records.operations import select as records_select\nfrom db.columns.base import MathesarColumn\nfrom db.columns.operations.select import get_column_name_from_attnum\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.transforms.operations.apply import apply_transformations\nfrom db.metadata import get_empty_metadata\n\n\nclass DBQuery:\n def __init__(\n self,\n base_table_oid,\n initial_columns,\n engine,\n transformations=None,\n name=None,\n # The same metadata will be used by all the methods within DBQuery\n # So make sure to change the metadata in case the DBQuery methods are called\n # after a mutation to the database object that could make the existing metadata invalid.\n metadata=None\n ):\n self.base_table_oid = base_table_oid\n for initial_col in initial_columns:\n assert isinstance(initial_col, InitialColumn)\n self.initial_columns = initial_columns\n self.engine = engine\n if transformations is None:\n # Less states to consider if no transformations is just an empty sequence\n transformations = tuple()\n self.transformations = transformations\n self.name = name\n self.metadata = metadata if metadata else get_empty_metadata()\n\n def get_input_aliases(self, ix_of_transform):\n \"\"\"\n Each transformation in a DBQuery has its own list of input aliases; this returns it.\n \"\"\"\n initial_aliases = self.initial_aliases\n if ix_of_transform == 0:\n return initial_aliases\n input_aliases = initial_aliases\n previous_transforms = self.transformations[:ix_of_transform]\n for transform in previous_transforms:\n output_aliases = transform.get_output_aliases(input_aliases)\n input_aliases = output_aliases\n return input_aliases\n\n def get_initial_column_by_input_alias(self, ix_of_transform, input_alias):\n \"\"\"\n Retraces the chain of input aliases until it gets to an initial column.\n\n Returns None if the alias does not originate from an initial column in a way that would\n preserve a unique constraint. E.g. if it is generated by an aggregation.\n \"\"\"\n initial_col_alias = \\\n self._get_initial_alias_by_input_alias(ix_of_transform, input_alias)\n if initial_col_alias is None:\n return None\n initial_column = \\\n self._get_initial_column_by_initial_column_alias(initial_col_alias)\n return initial_column\n\n def _get_initial_alias_by_input_alias(self, ix_of_transform, input_alias):\n if ix_of_transform == 0:\n return input_alias\n transforms = self.transformations[:ix_of_transform]\n initial_aliases = self.initial_aliases\n input_aliases = initial_aliases\n uc_mappings_for_each_transform = [\n transform.get_unique_constraint_mappings(input_aliases)\n for transform in transforms\n ]\n for uc_mappings in reversed(uc_mappings_for_each_transform):\n for uc_mapping in uc_mappings:\n if uc_mapping.output_alias == input_alias:\n input_alias = uc_mapping.input_alias\n if input_alias is None:\n return None\n break\n initial_alias = input_alias\n return initial_alias\n\n def _get_initial_column_by_initial_column_alias(self, alias):\n \"\"\"\n Looks up an initial column by initial column alias; no recursive logic.\n \"\"\"\n for initial_column in self.initial_columns:\n if initial_column.alias == alias:\n return initial_column\n\n @property\n def initial_aliases(self):\n return [\n initial_column.alias\n for initial_column\n in self.initial_columns\n ]\n\n # mirrors a method in db.records.operations.select\n def get_records(self, **kwargs):\n # NOTE how through this method you can perform a second batch of\n # transformations. this reflects fact that we can form a query, and\n # then apply temporary transforms on it, like how you can apply\n # temporary transforms to a table when in a table view.\n return records_select.get_records_with_default_order(\n table=self.transformed_relation, engine=self.engine, **kwargs,\n )\n\n # mirrors a method in db.records.operations.select\n def get_count(self, **kwargs):\n return records_select.get_count(\n table=self.transformed_relation, engine=self.engine, **kwargs,\n )\n\n # NOTE if too expensive, can be rewritten to parse DBQuery spec, instead of leveraging sqlalchemy\n @property\n def all_sa_columns_map(self):\n \"\"\"\n Expensive! use with care.\n \"\"\"\n initial_columns_map = {\n col.name: MathesarColumn.from_column(col, engine=self.engine)\n for col in self.initial_relation.columns\n }\n output_columns_map = {\n col.name: col for col in self.sa_output_columns\n }\n transforms_columns_map = {} if self.transformations is None else {\n col.name: MathesarColumn.from_column(col, engine=self.engine)\n for i in range(len(self.transformations))\n for col in DBQuery(\n base_table_oid=self.base_table_oid,\n initial_columns=self.initial_columns,\n engine=self.engine,\n transformations=self.transformations[:i],\n name=f'{self.name}_{i}'\n ).transformed_relation.columns\n }\n map_of_alias_to_sa_col = initial_columns_map | transforms_columns_map | output_columns_map\n return map_of_alias_to_sa_col\n\n @property\n def sa_output_columns(self):\n \"\"\"\n Sequence of SQLAlchemy columns representing the output columns of the\n relation described by this query.\n \"\"\"\n return tuple(\n MathesarColumn.from_column(sa_col, engine=self.engine)\n for sa_col\n in self.transformed_relation.columns\n )\n\n @property\n def transformed_relation(self):\n \"\"\"\n A query describes a relation. This property is the result of parsing a\n query into a relation.\n \"\"\"\n transformations = self.transformations\n if transformations:\n transformed = apply_transformations(\n self.initial_relation,\n transformations,\n )\n return transformed\n else:\n return self.initial_relation\n\n @property\n def initial_relation(self):\n metadata = self.metadata\n base_table = reflect_table_from_oid(\n self.base_table_oid, self.engine, metadata=metadata\n )\n from_clause = base_table\n # We cache this to avoid copies of the same join path to a given table\n jp_path_alias_map = {(): base_table}\n jp_path_unique_set = set()\n\n def _get_table(oid):\n \"\"\"\n We use the function-scoped metadata so all involved tables are aware\n of each other.\n \"\"\"\n return reflect_table_from_oid(oid, self.engine, metadata=metadata, keep_existing=True)\n\n def _get_column_name(oid, attnum):\n return get_column_name_from_attnum(oid, attnum, self.engine, metadata=metadata)\n\n def _process_initial_column(col):\n nonlocal from_clause\n col_name = _get_column_name(col.reloid, col.attnum)\n # Make the path hashable so it can be a dict key\n jp_path = _guarantee_jp_path_tuples(col.jp_path)\n right = base_table\n\n for i, jp in enumerate(jp_path):\n left = jp_path_alias_map[jp_path[:i]]\n right_table = jp_path[:i + 1]\n if right_table in jp_path_alias_map:\n right = jp_path_alias_map[right_table]\n else:\n right = _get_table(jp[1][0]).alias()\n jp_path_alias_map[jp_path[:i + 1]] = right\n left_col_name = _get_column_name(jp[0][0], jp[0][1])\n right_col_name = _get_column_name(jp[1][0], jp[1][1])\n left_col = left.columns[left_col_name]\n right_col = right.columns[right_col_name]\n join_columns = f\"{left_col}, {right_col}\"\n if join_columns not in jp_path_unique_set:\n jp_path_unique_set.add(join_columns)\n from_clause = from_clause.join(\n right, onclause=left_col == right_col, isouter=True,\n )\n\n return right.columns[col_name].label(col.alias)\n\n stmt = select(\n [_process_initial_column(col) for col in self.initial_columns]\n ).select_from(from_clause)\n return stmt.cte()\n\n def get_input_alias_for_output_alias(self, output_alias):\n return self.map_of_output_alias_to_input_alias.get(output_alias)\n\n # TODO consider caching; not urgent, since redundant calls don't trigger IO, it seems\n @property\n def map_of_output_alias_to_input_alias(self):\n m = dict()\n transforms = self.transformations\n if transforms:\n for transform in transforms:\n m = m | transform.map_of_output_alias_to_input_alias\n return m\n\n\nclass InitialColumn:\n def __init__(\n self,\n # TODO consider renaming to oid; reloid is not a term we use,\n # even if it's what postgres uses; or use reloid more\n reloid,\n attnum,\n alias,\n jp_path=None,\n ):\n # alias mustn't be an empty string\n assert isinstance(alias, str) and alias.strip() != \"\"\n self.reloid = reloid\n self.attnum = attnum\n self.alias = alias\n self.jp_path = _guarantee_jp_path_tuples(jp_path)\n\n @property\n def is_base_column(self):\n \"\"\"\n A base column is an initial column on a query's base table.\n \"\"\"\n return self.jp_path is None\n\n def __eq__(self, other):\n \"\"\"Instances are equal when attributes are equal.\"\"\"\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return False\n\n def __hash__(self):\n \"\"\"Hashes are equal when attributes are equal.\"\"\"\n return hash(frozendict(self.__dict__))\n\n\ndef _guarantee_jp_path_tuples(jp_path):\n \"\"\"\n Makes sure that jp_path is made up of tuples or is an empty tuple.\n \"\"\"\n if jp_path is not None:\n return tuple(\n (\n tuple(edge[0]),\n tuple(edge[1]),\n )\n for edge\n in jp_path\n )\n else:\n return tuple()\n", "path": "db/queries/base.py"}]}
| 3,579 | 420 |
gh_patches_debug_31768
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-5592
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
XRT Map ScaleValueError
Hello,
I am getting an error when trying to plot the HinodeXRT.fits file using:
```
m = sunpy.map.Map('HinodeXRT.fits')
m.plot()
```
The error is: `ScaleValueError: Scale 'utc (tbr)' is not in the allowed scales ['local', 'tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc']`
A similar fix was implemented for this issue but it was only match uppercase ‘UTC (TBR)’
https://github.com/sunpy/sunpy/blob/71b09358db2a08fa2aec34a9798c1cc7ed6368ac/sunpy/map/sources/hinode.py#L49-L51
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/map/sources/hinode.py`
Content:
```
1 """Hinode XRT and SOT Map subclass definitions"""
2
3 __author__ = ["Jack Ireland, Jose Ivan Campos-Rozo, David Perez-Suarez"]
4 __email__ = "[email protected]"
5
6 from sunpy import log
7 from sunpy.map import GenericMap
8
9 __all__ = ['XRTMap', 'SOTMap']
10
11 # the following values comes from xrt_prep.pro
12 # search for saturation in
13 # http://darts.jaxa.jp/pub/ssw/hinode/xrt/idl/util/xrt_prep.pro
14 # SATURATION_LIMIT = 2500
15
16
17 def _lower_list(l):
18 return [item.lower() for item in l]
19
20
21 class XRTMap(GenericMap):
22 """Hinode XRT map definition.
23
24 The X-Ray Telescope (XRT) is a high resolution grazing incidence telescope,
25 which is a succsessor to Yohkoh. It provides 2-arcsecond resolution images
26 of the highest temperature solar coronal material,
27 from 1,000,000 to 10,000,000 Kelvin.
28
29 Hinode was launched on 22 September 2006 into a sun-synchronous orbit.
30
31 References
32 ----------
33 * `Hinode Mission Page <https://solarb.msfc.nasa.gov/index.html>`_
34 * `XRT Instrument Page <http://xrt.cfa.harvard.edu>`_
35 * `Fits header reference <http://hinode.nao.ac.jp/uploads/2016/04/22/SB_MW_Key13.pdf>`_
36 * `Hinode User Guide <http://hinode.nao.ac.jp/en/for-researchers/analysis-guide/>`_
37 * `XRT Analysis Guide <http://xrt.cfa.harvard.edu/science/tutorials.php>`_
38 * `Coronal Temperature Diagnostic Capability of the Hinode/X-Ray Telescope Based on Self-Consistent Calibration <https://arxiv.org/abs/1011.2867>`_
39 """
40 filter_wheel1_measurements = ["Al_med", "Al_poly", "Be_med",
41 "Be_thin", "C_poly", "Open"]
42 filter_wheel2_measurements = ["Open", "Al_mesh", "Al_thick",
43 "Be_thick", "Gband", "Ti_poly"]
44
45 def __init__(self, data, header, **kwargs):
46
47 super().__init__(data, header, **kwargs)
48
49 if self.meta.get('timesys', '') == 'UTC (TBR)':
50 log.debug('Modifying TIMESYS keyword from "UTC (TBR)" to "UTC"')
51 self.meta['timesys'] = 'UTC'
52
53 # converting data array to masked array
54 # self.data = ma.masked_where(self.data > SATURATION_LIMIT, self.data)
55
56 fw1 = header.get('EC_FW1_')
57 if fw1.lower() not in _lower_list(self.filter_wheel1_measurements):
58 raise ValueError('Unpexpected filter wheel 1 in header.')
59 fw1 = fw1.replace("_", " ")
60
61 fw2 = header.get('EC_FW2_')
62 if fw2.lower() not in _lower_list(self.filter_wheel2_measurements):
63 raise ValueError('Unpexpected filter wheel 2 in header.')
64 fw2 = fw2.replace("_", " ")
65
66 self.meta['detector'] = "XRT"
67 # self.meta['instrume'] = "XRT"
68 self.meta['telescop'] = "Hinode"
69 self.plot_settings['cmap'] = 'hinodexrt'
70
71 @property
72 def measurement(self):
73 fw1 = self.meta.get('EC_FW1_').replace("_", " ")
74 fw2 = self.meta.get('EC_FW2_').replace("_", " ")
75 return f"{fw1}-{fw2}"
76
77 @classmethod
78 def is_datasource_for(cls, data, header, **kwargs):
79 """Determines if header corresponds to an XRT image"""
80 return header.get('instrume') == 'XRT'
81
82
83 class SOTMap(GenericMap):
84 """Hinode SOT Image Map definition.
85
86 The Hinode Solar Optical Telescope (SOT) consists of a 50 cm
87 diffraction-limited Gregorian telescope. It is optimized for
88 accurate measurement of the vector magnetic
89 field in the photosphere and dynamics of both the photosphere and
90 chromosphere associated with the magnetic fields.
91
92 Hinode was launched on 22 September 2006 into a sun-synchronous orbit.
93
94 References
95 ----------
96 * `Hinode Mission Page <http://solarb.msfc.nasa.gov/index.html>`_
97 * `Hinode SOT Instrument Page <http://sot.lmsal.com>`_
98 * `Hinode SOT Instrument Paper <https://arxiv.org/abs/0711.1715>`_
99 * `Data Analsis Guide <https://sot.lmsal.com/doc/rep/sot254/fid366/SOT00042_33_SOT_Analysis_Guide_SAG.pdf>`_
100 """
101 # TODO: get a link for the SOT FITS headers
102 # Add in some information about the the possible instrument, observation
103 # type, observable ion and wavelength
104
105 Instruments = ['SOT/WB', 'SOT/NB', 'SOT/SP', 'SOT/CT']
106
107 Waves = ['6302A', 'BFI no move', 'CN bandhead 3883',
108 'Ca II H line', 'G band 4305', 'NFI no move',
109 'TF Fe I 6302', 'TF Mg I 5172', 'TF Na I 5896',
110 'blue cont 4504', 'green cont 5550', 'red cont 6684']
111
112 Observation_Type = ['FG (simple)', 'FG focus scan',
113 'FG shuttered I and V', 'FG shutterless I and V',
114 'FG shutterless I and V with 0.2s intervals',
115 'FG shutterless Stokes', 'SP IQUV 4D array']
116
117 def __init__(self, data, header, **kwargs):
118 super().__init__(data, header, **kwargs)
119
120 self.meta['detector'] = "SOT"
121 self.meta['telescop'] = "Hinode"
122 self._nickname = self.detector
123
124 # TODO (add other options, Now all threated as intensity. This follows
125 # Hinode SDC archive) StokesQUV -> grey, Velocity -> EIS, Width -> EIS,
126 # Mag Field Azi -> IDL 5 (STD gamma II)
127 # 'WB' -> red
128 # 'NB'(0 = red); (>0 = gray), # nb has 1 stokes I, the rest quv
129 # 'SP' (<=1 = red); (>1 = gray) #sp has 2 stokes I, the rest quv
130 color = {'SOT/WB': 'intensity',
131 'SOT/NB': 'intensity', # For the 1st dimension
132 'SOT/SP': 'intensity', # For the 1st 2 dimensions
133 }
134
135 self.plot_settings['cmap'] = 'hinodesot' + color[self.instrument]
136
137 @classmethod
138 def is_datasource_for(cls, data, header, **kwargs):
139 """Determines if header corresponds to an SOT image."""
140 return header.get('instrume') in cls.Instruments
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/map/sources/hinode.py b/sunpy/map/sources/hinode.py
--- a/sunpy/map/sources/hinode.py
+++ b/sunpy/map/sources/hinode.py
@@ -1,18 +1,12 @@
"""Hinode XRT and SOT Map subclass definitions"""
+from sunpy import log
+from sunpy.map import GenericMap
__author__ = ["Jack Ireland, Jose Ivan Campos-Rozo, David Perez-Suarez"]
__email__ = "[email protected]"
-from sunpy import log
-from sunpy.map import GenericMap
-
__all__ = ['XRTMap', 'SOTMap']
-# the following values comes from xrt_prep.pro
-# search for saturation in
-# http://darts.jaxa.jp/pub/ssw/hinode/xrt/idl/util/xrt_prep.pro
-# SATURATION_LIMIT = 2500
-
def _lower_list(l):
return [item.lower() for item in l]
@@ -46,13 +40,10 @@
super().__init__(data, header, **kwargs)
- if self.meta.get('timesys', '') == 'UTC (TBR)':
+ if self.meta.get('timesys', '').upper() == 'UTC (TBR)':
log.debug('Modifying TIMESYS keyword from "UTC (TBR)" to "UTC"')
self.meta['timesys'] = 'UTC'
- # converting data array to masked array
- # self.data = ma.masked_where(self.data > SATURATION_LIMIT, self.data)
-
fw1 = header.get('EC_FW1_')
if fw1.lower() not in _lower_list(self.filter_wheel1_measurements):
raise ValueError('Unpexpected filter wheel 1 in header.')
@@ -64,7 +55,6 @@
fw2 = fw2.replace("_", " ")
self.meta['detector'] = "XRT"
-# self.meta['instrume'] = "XRT"
self.meta['telescop'] = "Hinode"
self.plot_settings['cmap'] = 'hinodexrt'
|
{"golden_diff": "diff --git a/sunpy/map/sources/hinode.py b/sunpy/map/sources/hinode.py\n--- a/sunpy/map/sources/hinode.py\n+++ b/sunpy/map/sources/hinode.py\n@@ -1,18 +1,12 @@\n \"\"\"Hinode XRT and SOT Map subclass definitions\"\"\"\n+from sunpy import log\n+from sunpy.map import GenericMap\n \n __author__ = [\"Jack Ireland, Jose Ivan Campos-Rozo, David Perez-Suarez\"]\n __email__ = \"[email protected]\"\n \n-from sunpy import log\n-from sunpy.map import GenericMap\n-\n __all__ = ['XRTMap', 'SOTMap']\n \n-# the following values comes from xrt_prep.pro\n-# search for saturation in\n-# http://darts.jaxa.jp/pub/ssw/hinode/xrt/idl/util/xrt_prep.pro\n-# SATURATION_LIMIT = 2500\n-\n \n def _lower_list(l):\n return [item.lower() for item in l]\n@@ -46,13 +40,10 @@\n \n super().__init__(data, header, **kwargs)\n \n- if self.meta.get('timesys', '') == 'UTC (TBR)':\n+ if self.meta.get('timesys', '').upper() == 'UTC (TBR)':\n log.debug('Modifying TIMESYS keyword from \"UTC (TBR)\" to \"UTC\"')\n self.meta['timesys'] = 'UTC'\n \n- # converting data array to masked array\n- # self.data = ma.masked_where(self.data > SATURATION_LIMIT, self.data)\n-\n fw1 = header.get('EC_FW1_')\n if fw1.lower() not in _lower_list(self.filter_wheel1_measurements):\n raise ValueError('Unpexpected filter wheel 1 in header.')\n@@ -64,7 +55,6 @@\n fw2 = fw2.replace(\"_\", \" \")\n \n self.meta['detector'] = \"XRT\"\n-# self.meta['instrume'] = \"XRT\"\n self.meta['telescop'] = \"Hinode\"\n self.plot_settings['cmap'] = 'hinodexrt'\n", "issue": "XRT Map ScaleValueError\nHello, \r\nI am getting an error when trying to plot the HinodeXRT.fits file using: \r\n```\r\nm = sunpy.map.Map('HinodeXRT.fits')\r\nm.plot()\r\n```\r\nThe error is: `ScaleValueError: Scale 'utc (tbr)' is not in the allowed scales ['local', 'tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc']`\r\n\r\n\r\n A similar fix was implemented for this issue but it was only match uppercase \u2018UTC (TBR)\u2019\r\n\r\nhttps://github.com/sunpy/sunpy/blob/71b09358db2a08fa2aec34a9798c1cc7ed6368ac/sunpy/map/sources/hinode.py#L49-L51\r\n\n", "before_files": [{"content": "\"\"\"Hinode XRT and SOT Map subclass definitions\"\"\"\n\n__author__ = [\"Jack Ireland, Jose Ivan Campos-Rozo, David Perez-Suarez\"]\n__email__ = \"[email protected]\"\n\nfrom sunpy import log\nfrom sunpy.map import GenericMap\n\n__all__ = ['XRTMap', 'SOTMap']\n\n# the following values comes from xrt_prep.pro\n# search for saturation in\n# http://darts.jaxa.jp/pub/ssw/hinode/xrt/idl/util/xrt_prep.pro\n# SATURATION_LIMIT = 2500\n\n\ndef _lower_list(l):\n return [item.lower() for item in l]\n\n\nclass XRTMap(GenericMap):\n \"\"\"Hinode XRT map definition.\n\n The X-Ray Telescope (XRT) is a high resolution grazing incidence telescope,\n which is a succsessor to Yohkoh. It provides 2-arcsecond resolution images\n of the highest temperature solar coronal material,\n from 1,000,000 to 10,000,000 Kelvin.\n\n Hinode was launched on 22 September 2006 into a sun-synchronous orbit.\n\n References\n ----------\n * `Hinode Mission Page <https://solarb.msfc.nasa.gov/index.html>`_\n * `XRT Instrument Page <http://xrt.cfa.harvard.edu>`_\n * `Fits header reference <http://hinode.nao.ac.jp/uploads/2016/04/22/SB_MW_Key13.pdf>`_\n * `Hinode User Guide <http://hinode.nao.ac.jp/en/for-researchers/analysis-guide/>`_\n * `XRT Analysis Guide <http://xrt.cfa.harvard.edu/science/tutorials.php>`_\n * `Coronal Temperature Diagnostic Capability of the Hinode/X-Ray Telescope Based on Self-Consistent Calibration <https://arxiv.org/abs/1011.2867>`_\n \"\"\"\n filter_wheel1_measurements = [\"Al_med\", \"Al_poly\", \"Be_med\",\n \"Be_thin\", \"C_poly\", \"Open\"]\n filter_wheel2_measurements = [\"Open\", \"Al_mesh\", \"Al_thick\",\n \"Be_thick\", \"Gband\", \"Ti_poly\"]\n\n def __init__(self, data, header, **kwargs):\n\n super().__init__(data, header, **kwargs)\n\n if self.meta.get('timesys', '') == 'UTC (TBR)':\n log.debug('Modifying TIMESYS keyword from \"UTC (TBR)\" to \"UTC\"')\n self.meta['timesys'] = 'UTC'\n\n # converting data array to masked array\n # self.data = ma.masked_where(self.data > SATURATION_LIMIT, self.data)\n\n fw1 = header.get('EC_FW1_')\n if fw1.lower() not in _lower_list(self.filter_wheel1_measurements):\n raise ValueError('Unpexpected filter wheel 1 in header.')\n fw1 = fw1.replace(\"_\", \" \")\n\n fw2 = header.get('EC_FW2_')\n if fw2.lower() not in _lower_list(self.filter_wheel2_measurements):\n raise ValueError('Unpexpected filter wheel 2 in header.')\n fw2 = fw2.replace(\"_\", \" \")\n\n self.meta['detector'] = \"XRT\"\n# self.meta['instrume'] = \"XRT\"\n self.meta['telescop'] = \"Hinode\"\n self.plot_settings['cmap'] = 'hinodexrt'\n\n @property\n def measurement(self):\n fw1 = self.meta.get('EC_FW1_').replace(\"_\", \" \")\n fw2 = self.meta.get('EC_FW2_').replace(\"_\", \" \")\n return f\"{fw1}-{fw2}\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an XRT image\"\"\"\n return header.get('instrume') == 'XRT'\n\n\nclass SOTMap(GenericMap):\n \"\"\"Hinode SOT Image Map definition.\n\n The Hinode Solar Optical Telescope (SOT) consists of a 50 cm\n diffraction-limited Gregorian telescope. It is optimized for\n accurate measurement of the vector magnetic\n field in the photosphere and dynamics of both the photosphere and\n chromosphere associated with the magnetic fields.\n\n Hinode was launched on 22 September 2006 into a sun-synchronous orbit.\n\n References\n ----------\n * `Hinode Mission Page <http://solarb.msfc.nasa.gov/index.html>`_\n * `Hinode SOT Instrument Page <http://sot.lmsal.com>`_\n * `Hinode SOT Instrument Paper <https://arxiv.org/abs/0711.1715>`_\n * `Data Analsis Guide <https://sot.lmsal.com/doc/rep/sot254/fid366/SOT00042_33_SOT_Analysis_Guide_SAG.pdf>`_\n \"\"\"\n # TODO: get a link for the SOT FITS headers\n # Add in some information about the the possible instrument, observation\n # type, observable ion and wavelength\n\n Instruments = ['SOT/WB', 'SOT/NB', 'SOT/SP', 'SOT/CT']\n\n Waves = ['6302A', 'BFI no move', 'CN bandhead 3883',\n 'Ca II H line', 'G band 4305', 'NFI no move',\n 'TF Fe I 6302', 'TF Mg I 5172', 'TF Na I 5896',\n 'blue cont 4504', 'green cont 5550', 'red cont 6684']\n\n Observation_Type = ['FG (simple)', 'FG focus scan',\n 'FG shuttered I and V', 'FG shutterless I and V',\n 'FG shutterless I and V with 0.2s intervals',\n 'FG shutterless Stokes', 'SP IQUV 4D array']\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n\n self.meta['detector'] = \"SOT\"\n self.meta['telescop'] = \"Hinode\"\n self._nickname = self.detector\n\n # TODO (add other options, Now all threated as intensity. This follows\n # Hinode SDC archive) StokesQUV -> grey, Velocity -> EIS, Width -> EIS,\n # Mag Field Azi -> IDL 5 (STD gamma II)\n # 'WB' -> red\n # 'NB'(0 = red); (>0 = gray), # nb has 1 stokes I, the rest quv\n # 'SP' (<=1 = red); (>1 = gray) #sp has 2 stokes I, the rest quv\n color = {'SOT/WB': 'intensity',\n 'SOT/NB': 'intensity', # For the 1st dimension\n 'SOT/SP': 'intensity', # For the 1st 2 dimensions\n }\n\n self.plot_settings['cmap'] = 'hinodesot' + color[self.instrument]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an SOT image.\"\"\"\n return header.get('instrume') in cls.Instruments\n", "path": "sunpy/map/sources/hinode.py"}], "after_files": [{"content": "\"\"\"Hinode XRT and SOT Map subclass definitions\"\"\"\nfrom sunpy import log\nfrom sunpy.map import GenericMap\n\n__author__ = [\"Jack Ireland, Jose Ivan Campos-Rozo, David Perez-Suarez\"]\n__email__ = \"[email protected]\"\n\n__all__ = ['XRTMap', 'SOTMap']\n\n\ndef _lower_list(l):\n return [item.lower() for item in l]\n\n\nclass XRTMap(GenericMap):\n \"\"\"Hinode XRT map definition.\n\n The X-Ray Telescope (XRT) is a high resolution grazing incidence telescope,\n which is a succsessor to Yohkoh. It provides 2-arcsecond resolution images\n of the highest temperature solar coronal material,\n from 1,000,000 to 10,000,000 Kelvin.\n\n Hinode was launched on 22 September 2006 into a sun-synchronous orbit.\n\n References\n ----------\n * `Hinode Mission Page <https://solarb.msfc.nasa.gov/index.html>`_\n * `XRT Instrument Page <http://xrt.cfa.harvard.edu>`_\n * `Fits header reference <http://hinode.nao.ac.jp/uploads/2016/04/22/SB_MW_Key13.pdf>`_\n * `Hinode User Guide <http://hinode.nao.ac.jp/en/for-researchers/analysis-guide/>`_\n * `XRT Analysis Guide <http://xrt.cfa.harvard.edu/science/tutorials.php>`_\n * `Coronal Temperature Diagnostic Capability of the Hinode/X-Ray Telescope Based on Self-Consistent Calibration <https://arxiv.org/abs/1011.2867>`_\n \"\"\"\n filter_wheel1_measurements = [\"Al_med\", \"Al_poly\", \"Be_med\",\n \"Be_thin\", \"C_poly\", \"Open\"]\n filter_wheel2_measurements = [\"Open\", \"Al_mesh\", \"Al_thick\",\n \"Be_thick\", \"Gband\", \"Ti_poly\"]\n\n def __init__(self, data, header, **kwargs):\n\n super().__init__(data, header, **kwargs)\n\n if self.meta.get('timesys', '').upper() == 'UTC (TBR)':\n log.debug('Modifying TIMESYS keyword from \"UTC (TBR)\" to \"UTC\"')\n self.meta['timesys'] = 'UTC'\n\n fw1 = header.get('EC_FW1_')\n if fw1.lower() not in _lower_list(self.filter_wheel1_measurements):\n raise ValueError('Unpexpected filter wheel 1 in header.')\n fw1 = fw1.replace(\"_\", \" \")\n\n fw2 = header.get('EC_FW2_')\n if fw2.lower() not in _lower_list(self.filter_wheel2_measurements):\n raise ValueError('Unpexpected filter wheel 2 in header.')\n fw2 = fw2.replace(\"_\", \" \")\n\n self.meta['detector'] = \"XRT\"\n self.meta['telescop'] = \"Hinode\"\n self.plot_settings['cmap'] = 'hinodexrt'\n\n @property\n def measurement(self):\n fw1 = self.meta.get('EC_FW1_').replace(\"_\", \" \")\n fw2 = self.meta.get('EC_FW2_').replace(\"_\", \" \")\n return f\"{fw1}-{fw2}\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an XRT image\"\"\"\n return header.get('instrume') == 'XRT'\n\n\nclass SOTMap(GenericMap):\n \"\"\"Hinode SOT Image Map definition.\n\n The Hinode Solar Optical Telescope (SOT) consists of a 50 cm\n diffraction-limited Gregorian telescope. It is optimized for\n accurate measurement of the vector magnetic\n field in the photosphere and dynamics of both the photosphere and\n chromosphere associated with the magnetic fields.\n\n Hinode was launched on 22 September 2006 into a sun-synchronous orbit.\n\n References\n ----------\n * `Hinode Mission Page <http://solarb.msfc.nasa.gov/index.html>`_\n * `Hinode SOT Instrument Page <http://sot.lmsal.com>`_\n * `Hinode SOT Instrument Paper <https://arxiv.org/abs/0711.1715>`_\n * `Data Analsis Guide <https://sot.lmsal.com/doc/rep/sot254/fid366/SOT00042_33_SOT_Analysis_Guide_SAG.pdf>`_\n \"\"\"\n # TODO: get a link for the SOT FITS headers\n # Add in some information about the the possible instrument, observation\n # type, observable ion and wavelength\n\n Instruments = ['SOT/WB', 'SOT/NB', 'SOT/SP', 'SOT/CT']\n\n Waves = ['6302A', 'BFI no move', 'CN bandhead 3883',\n 'Ca II H line', 'G band 4305', 'NFI no move',\n 'TF Fe I 6302', 'TF Mg I 5172', 'TF Na I 5896',\n 'blue cont 4504', 'green cont 5550', 'red cont 6684']\n\n Observation_Type = ['FG (simple)', 'FG focus scan',\n 'FG shuttered I and V', 'FG shutterless I and V',\n 'FG shutterless I and V with 0.2s intervals',\n 'FG shutterless Stokes', 'SP IQUV 4D array']\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n\n self.meta['detector'] = \"SOT\"\n self.meta['telescop'] = \"Hinode\"\n self._nickname = self.detector\n\n # TODO (add other options, Now all threated as intensity. This follows\n # Hinode SDC archive) StokesQUV -> grey, Velocity -> EIS, Width -> EIS,\n # Mag Field Azi -> IDL 5 (STD gamma II)\n # 'WB' -> red\n # 'NB'(0 = red); (>0 = gray), # nb has 1 stokes I, the rest quv\n # 'SP' (<=1 = red); (>1 = gray) #sp has 2 stokes I, the rest quv\n color = {'SOT/WB': 'intensity',\n 'SOT/NB': 'intensity', # For the 1st dimension\n 'SOT/SP': 'intensity', # For the 1st 2 dimensions\n }\n\n self.plot_settings['cmap'] = 'hinodesot' + color[self.instrument]\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an SOT image.\"\"\"\n return header.get('instrume') in cls.Instruments\n", "path": "sunpy/map/sources/hinode.py"}]}
| 2,437 | 477 |
gh_patches_debug_2166
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1802
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'Event' object has no attribute 'title_en'
Sentry Issue: [CONCREXIT-70](https://sentry.io/organizations/thalia/issues/2487433496/?referrer=github_integration)
```
AttributeError: 'Event' object has no attribute 'title_en'
(9 additional frame(s) were not displayed)
...
File "django/contrib/admin/options.py", line 1540, in changeform_view
return self._changeform_view(request, object_id, form_url, extra_context)
File "django/contrib/admin/options.py", line 1586, in _changeform_view
self.save_model(request, new_object, form, not add)
File "photos/admin.py", line 45, in save_model
super().save_model(request, obj, form, change)
File "django/contrib/admin/options.py", line 1099, in save_model
obj.save()
File "photos/models.py", line 158, in save
self.title = self.event.title_en
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/models.py`
Content:
```
1 import hashlib
2 import logging
3 import os
4 import random
5
6 from django.conf import settings
7 from django.core.exceptions import ValidationError
8 from django.db import models
9 from django.urls import reverse
10 from django.utils import timezone
11 from django.utils.functional import cached_property
12 from django.utils.translation import gettext_lazy as _
13
14 from members.models import Member
15 from events.models import Event
16 from pushnotifications.models import ScheduledMessage, Category
17
18 COVER_FILENAME = "cover.jpg"
19
20
21 logger = logging.getLogger(__name__)
22
23
24 def photo_uploadto(instance, filename):
25 """Get path of file to upload to."""
26 num = instance.album.photo_set.count()
27 extension = os.path.splitext(filename)[1]
28 new_filename = str(num).zfill(4) + extension
29 return os.path.join(Album.photosdir, instance.album.dirname, new_filename)
30
31
32 class Photo(models.Model):
33 """Model for a Photo object."""
34
35 album = models.ForeignKey(
36 "Album", on_delete=models.CASCADE, verbose_name=_("album")
37 )
38
39 file = models.ImageField(_("file"), upload_to=photo_uploadto)
40
41 rotation = models.IntegerField(
42 verbose_name=_("rotation"),
43 default=0,
44 choices=((x, x) for x in (0, 90, 180, 270)),
45 help_text=_("This does not modify the original image file."),
46 )
47
48 hidden = models.BooleanField(_("hidden"), default=False)
49
50 _digest = models.CharField("digest", max_length=40,)
51
52 def __init__(self, *args, **kwargs):
53 """Initialize Photo object and set the file if it exists."""
54 super().__init__(*args, **kwargs)
55 if self.file:
56 self.original_file = self.file.path
57 else:
58 self.original_file = ""
59
60 def __str__(self):
61 """Return the filename of a Photo object."""
62 return os.path.basename(self.file.name)
63
64 class Meta:
65 """Meta class for Photo."""
66
67 ordering = ("file",)
68
69
70 class Album(models.Model):
71 """Model for Album objects."""
72
73 title = models.CharField(
74 _("title"),
75 blank=True,
76 max_length=200,
77 help_text=_("Leave empty to take over the title of the event"),
78 )
79
80 dirname = models.CharField(verbose_name=_("directory name"), max_length=200,)
81
82 date = models.DateField(
83 verbose_name=_("date"),
84 blank=True,
85 help_text=_("Leave empty to take over the date of the event"),
86 )
87
88 slug = models.SlugField(verbose_name=_("slug"), unique=True,)
89
90 hidden = models.BooleanField(verbose_name=_("hidden"), default=False)
91
92 new_album_notification = models.ForeignKey(
93 ScheduledMessage, on_delete=models.deletion.SET_NULL, blank=True, null=True
94 )
95
96 event = models.ForeignKey(Event, on_delete=models.SET_NULL, blank=True, null=True)
97
98 _cover = models.OneToOneField(
99 Photo,
100 on_delete=models.SET_NULL,
101 blank=True,
102 null=True,
103 related_name="covered_album",
104 verbose_name=_("cover image"),
105 )
106
107 shareable = models.BooleanField(verbose_name=_("shareable"), default=False)
108
109 photosdir = "photos"
110 photospath = os.path.join(settings.MEDIA_ROOT, photosdir)
111
112 @cached_property
113 def cover(self):
114 """Return cover of Album.
115
116 If a cover is not set, return a random photo or None if there are no photos.
117 """
118 cover = None
119 if self._cover is not None:
120 return self._cover
121 if self.photo_set.exists():
122 random.seed(self.dirname)
123 cover = random.choice(self.photo_set.all())
124 return cover
125
126 def __str__(self):
127 """Get string representation of Album."""
128 return "{} {}".format(self.date.strftime("%Y-%m-%d"), self.title)
129
130 def get_absolute_url(self):
131 """Get url of Album."""
132 return reverse("photos:album", args=[str(self.slug)])
133
134 def clean(self):
135 super().clean()
136 errors = {}
137
138 if not self.title and not self.event:
139 errors.update(
140 {"title": _("This field is required if there is no event selected.")}
141 )
142
143 if not self.date and not self.event:
144 errors.update(
145 {"date": _("This field is required if there is no event selected.")}
146 )
147
148 if errors:
149 raise ValidationError(errors)
150
151 def save(self, **kwargs):
152 """Save album and send appropriate notifications."""
153 # dirname is only set for new objects, to avoid ever changing it
154 if self.pk is None:
155 self.dirname = self.slug
156
157 if not self.title and self.event:
158 self.title = self.event.title_en
159
160 if not self.date:
161 self.date = self.event.start.date()
162
163 if not self.hidden and (
164 self.new_album_notification is None or not self.new_album_notification.sent
165 ):
166 new_album_notification_time = timezone.now() + timezone.timedelta(hours=1)
167 new_album_notification = ScheduledMessage()
168
169 if (
170 self.new_album_notification is not None
171 and not self.new_album_notification.sent
172 ):
173 new_album_notification = self.new_album_notification
174
175 new_album_notification.title_en = "New album uploaded"
176 new_album_notification.body_en = (
177 f"A new photo album '{self.title}' has just been uploaded"
178 )
179 new_album_notification.category = Category.objects.get(key=Category.PHOTO)
180 new_album_notification.url = f"{settings.BASE_URL}{self.get_absolute_url()}"
181 new_album_notification.time = new_album_notification_time
182 new_album_notification.save()
183 self.new_album_notification = new_album_notification
184 self.new_album_notification.users.set(Member.current_members.all())
185 elif (
186 self.hidden
187 and self.new_album_notification is not None
188 and not self.new_album_notification.sent
189 ):
190 existing_notification = self.new_album_notification
191 self.new_album_notification = None
192 existing_notification.delete()
193
194 super().save(**kwargs)
195
196 @property
197 def access_token(self):
198 """Return access token for album."""
199 return hashlib.sha256(
200 "{}album{}".format(settings.SECRET_KEY, self.pk).encode("utf-8")
201 ).hexdigest()
202
203 class Meta:
204 """Meta class for Album."""
205
206 ordering = ("-date", "title")
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/photos/models.py b/website/photos/models.py
--- a/website/photos/models.py
+++ b/website/photos/models.py
@@ -155,7 +155,7 @@
self.dirname = self.slug
if not self.title and self.event:
- self.title = self.event.title_en
+ self.title = self.event.title
if not self.date:
self.date = self.event.start.date()
|
{"golden_diff": "diff --git a/website/photos/models.py b/website/photos/models.py\n--- a/website/photos/models.py\n+++ b/website/photos/models.py\n@@ -155,7 +155,7 @@\n self.dirname = self.slug\n \n if not self.title and self.event:\n- self.title = self.event.title_en\n+ self.title = self.event.title\n \n if not self.date:\n self.date = self.event.start.date()\n", "issue": "AttributeError: 'Event' object has no attribute 'title_en'\nSentry Issue: [CONCREXIT-70](https://sentry.io/organizations/thalia/issues/2487433496/?referrer=github_integration)\n\n```\nAttributeError: 'Event' object has no attribute 'title_en'\n(9 additional frame(s) were not displayed)\n...\n File \"django/contrib/admin/options.py\", line 1540, in changeform_view\n return self._changeform_view(request, object_id, form_url, extra_context)\n File \"django/contrib/admin/options.py\", line 1586, in _changeform_view\n self.save_model(request, new_object, form, not add)\n File \"photos/admin.py\", line 45, in save_model\n super().save_model(request, obj, form, change)\n File \"django/contrib/admin/options.py\", line 1099, in save_model\n obj.save()\n File \"photos/models.py\", line 158, in save\n self.title = self.event.title_en\n```\n", "before_files": [{"content": "import hashlib\nimport logging\nimport os\nimport random\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom events.models import Event\nfrom pushnotifications.models import ScheduledMessage, Category\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n \"\"\"Get path of file to upload to.\"\"\"\n num = instance.album.photo_set.count()\n extension = os.path.splitext(filename)[1]\n new_filename = str(num).zfill(4) + extension\n return os.path.join(Album.photosdir, instance.album.dirname, new_filename)\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = models.ImageField(_(\"file\"), upload_to=photo_uploadto)\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n hidden = models.BooleanField(_(\"hidden\"), default=False)\n\n _digest = models.CharField(\"digest\", max_length=40,)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.path\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n ordering = (\"file\",)\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(verbose_name=_(\"directory name\"), max_length=200,)\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(verbose_name=_(\"slug\"), unique=True,)\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n new_album_notification = models.ForeignKey(\n ScheduledMessage, on_delete=models.deletion.SET_NULL, blank=True, null=True\n )\n\n event = models.ForeignKey(Event, on_delete=models.SET_NULL, blank=True, null=True)\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n if self.photo_set.exists():\n random.seed(self.dirname)\n cover = random.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return \"{} {}\".format(self.date.strftime(\"%Y-%m-%d\"), self.title)\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title_en\n\n if not self.date:\n self.date = self.event.start.date()\n\n if not self.hidden and (\n self.new_album_notification is None or not self.new_album_notification.sent\n ):\n new_album_notification_time = timezone.now() + timezone.timedelta(hours=1)\n new_album_notification = ScheduledMessage()\n\n if (\n self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n new_album_notification = self.new_album_notification\n\n new_album_notification.title_en = \"New album uploaded\"\n new_album_notification.body_en = (\n f\"A new photo album '{self.title}' has just been uploaded\"\n )\n new_album_notification.category = Category.objects.get(key=Category.PHOTO)\n new_album_notification.url = f\"{settings.BASE_URL}{self.get_absolute_url()}\"\n new_album_notification.time = new_album_notification_time\n new_album_notification.save()\n self.new_album_notification = new_album_notification\n self.new_album_notification.users.set(Member.current_members.all())\n elif (\n self.hidden\n and self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n existing_notification = self.new_album_notification\n self.new_album_notification = None\n existing_notification.delete()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n \"{}album{}\".format(settings.SECRET_KEY, self.pk).encode(\"utf-8\")\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py"}], "after_files": [{"content": "import hashlib\nimport logging\nimport os\nimport random\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members.models import Member\nfrom events.models import Event\nfrom pushnotifications.models import ScheduledMessage, Category\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n \"\"\"Get path of file to upload to.\"\"\"\n num = instance.album.photo_set.count()\n extension = os.path.splitext(filename)[1]\n new_filename = str(num).zfill(4) + extension\n return os.path.join(Album.photosdir, instance.album.dirname, new_filename)\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = models.ImageField(_(\"file\"), upload_to=photo_uploadto)\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n hidden = models.BooleanField(_(\"hidden\"), default=False)\n\n _digest = models.CharField(\"digest\", max_length=40,)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.path\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n ordering = (\"file\",)\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(verbose_name=_(\"directory name\"), max_length=200,)\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(verbose_name=_(\"slug\"), unique=True,)\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n new_album_notification = models.ForeignKey(\n ScheduledMessage, on_delete=models.deletion.SET_NULL, blank=True, null=True\n )\n\n event = models.ForeignKey(Event, on_delete=models.SET_NULL, blank=True, null=True)\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n if self.photo_set.exists():\n random.seed(self.dirname)\n cover = random.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return \"{} {}\".format(self.date.strftime(\"%Y-%m-%d\"), self.title)\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title\n\n if not self.date:\n self.date = self.event.start.date()\n\n if not self.hidden and (\n self.new_album_notification is None or not self.new_album_notification.sent\n ):\n new_album_notification_time = timezone.now() + timezone.timedelta(hours=1)\n new_album_notification = ScheduledMessage()\n\n if (\n self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n new_album_notification = self.new_album_notification\n\n new_album_notification.title_en = \"New album uploaded\"\n new_album_notification.body_en = (\n f\"A new photo album '{self.title}' has just been uploaded\"\n )\n new_album_notification.category = Category.objects.get(key=Category.PHOTO)\n new_album_notification.url = f\"{settings.BASE_URL}{self.get_absolute_url()}\"\n new_album_notification.time = new_album_notification_time\n new_album_notification.save()\n self.new_album_notification = new_album_notification\n self.new_album_notification.users.set(Member.current_members.all())\n elif (\n self.hidden\n and self.new_album_notification is not None\n and not self.new_album_notification.sent\n ):\n existing_notification = self.new_album_notification\n self.new_album_notification = None\n existing_notification.delete()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n \"{}album{}\".format(settings.SECRET_KEY, self.pk).encode(\"utf-8\")\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py"}]}
| 2,350 | 96 |
gh_patches_debug_26489
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1140
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Misguiding messages on contributor's course edit page
When a course is already approved, the contributor course edit page shows a message saying you can't edit this course anymore (introduced in #1076).
Also, a message instructing the user to edit the data is shown. This is misleading. The second message shouldn't be there.

@janno42 please make sure I'm not missing out anything here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/contributor/views.py`
Content:
```
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied, SuspiciousOperation
3 from django.forms.models import inlineformset_factory
4 from django.shortcuts import get_object_or_404, redirect, render
5 from django.utils.translation import ugettext as _
6 from django.db import IntegrityError, transaction
7
8 from evap.contributor.forms import CourseForm, DelegatesForm, EditorContributionForm
9 from evap.evaluation.auth import contributor_or_delegate_required, editor_or_delegate_required, editor_required
10 from evap.evaluation.models import Contribution, Course, Semester
11 from evap.evaluation.tools import STATES_ORDERED, sort_formset
12 from evap.results.tools import calculate_average_grades_and_deviation
13 from evap.staff.forms import ContributionFormSet
14 from evap.student.views import get_valid_form_groups_or_render_vote_page
15
16
17 @contributor_or_delegate_required
18 def index(request):
19 user = request.user
20
21 contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
22 own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states)
23
24 represented_users = user.represented_users.all()
25 delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states)
26
27 all_courses = list(own_courses) + list(delegated_courses)
28 all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state))
29
30 for course in all_courses:
31 if course.state == 'published':
32 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
33
34 semesters = Semester.objects.all()
35 semester_list = [dict(
36 semester_name=semester.name,
37 id=semester.id,
38 is_active_semester=semester.is_active_semester,
39 courses=[course for course in all_courses if course.semester_id == semester.id]
40 ) for semester in semesters]
41
42 template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses)
43 return render(request, "contributor_index.html", template_data)
44
45
46 @editor_required
47 def settings_edit(request):
48 user = request.user
49 form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)
50
51 if form.is_valid():
52 form.save()
53
54 messages.success(request, _("Successfully updated your settings."))
55 return redirect('contributor:settings_edit')
56 else:
57 return render(request, "contributor_settings.html", dict(
58 form=form,
59 delegate_of=user.represented_users.all(),
60 cc_users=user.cc_users.all(),
61 ccing_users=user.ccing_users.all(),
62 ))
63
64
65 @editor_or_delegate_required
66 def course_view(request, course_id):
67 user = request.user
68 course = get_object_or_404(Course, id=course_id)
69
70 # check rights
71 if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):
72 raise PermissionDenied
73
74 if course.is_user_editor_or_delegate(user):
75 messages.info(request, _('You cannot edit this course because it has already been approved.'))
76
77 InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)
78
79 form = CourseForm(request.POST or None, instance=course)
80 formset = InlineContributionFormset(request.POST or None, instance=course)
81
82 # make everything read-only
83 for cform in formset.forms + [form]:
84 for field in cform.fields.values():
85 field.disabled = True
86
87 template_data = dict(form=form, formset=formset, course=course, editable=False,
88 responsibles=[contributor.username for contributor in course.responsible_contributors])
89 return render(request, "contributor_course_form.html", template_data)
90
91
92 def render_preview(request, formset, course_form, course):
93 # open transaction to not let any other requests see anything of what we're doing here
94 try:
95 with transaction.atomic():
96 course_form.save(user=request.user)
97 formset.save()
98 request.POST = None # this prevents errors rendered in the vote form
99
100 preview_response = get_valid_form_groups_or_render_vote_page(request, course, preview=True, for_rendering_in_modal=True)[1].content.decode()
101 raise IntegrityError # rollback transaction to discard the database writes
102 except IntegrityError:
103 pass
104
105 return preview_response
106
107
108 @editor_or_delegate_required
109 def course_edit(request, course_id):
110 user = request.user
111 course = get_object_or_404(Course, id=course_id)
112
113 # check rights
114 if not (course.is_user_editor_or_delegate(user) and course.state == 'prepared'):
115 raise PermissionDenied
116
117 post_operation = request.POST.get('operation') if request.POST else None
118 preview = post_operation == 'preview'
119
120 InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)
121 course_form = CourseForm(request.POST or None, instance=course)
122 formset = InlineContributionFormset(request.POST or None, instance=course, can_change_responsible=False, form_kwargs={'course': course})
123
124 forms_are_valid = course_form.is_valid() and formset.is_valid()
125
126 if forms_are_valid and not preview:
127 if post_operation not in ('save', 'approve'):
128 raise SuspiciousOperation("Invalid POST operation")
129
130 course_form.save(user=user)
131 formset.save()
132
133 if post_operation == 'approve':
134 course.editor_approve()
135 course.save()
136 messages.success(request, _("Successfully updated and approved course."))
137 else:
138 messages.success(request, _("Successfully updated course."))
139
140 return redirect('contributor:index')
141 else:
142 preview_html = None
143 if preview and forms_are_valid:
144 preview_html = render_preview(request, formset, course_form, course)
145
146 if not forms_are_valid and (course_form.errors or formset.errors):
147 if preview:
148 messages.error(request, _("The preview could not be rendered. Please resolve the errors shown below."))
149 else:
150 messages.error(request, _("The form was not saved. Please resolve the errors shown below."))
151
152 sort_formset(request, formset)
153 template_data = dict(form=course_form, formset=formset, course=course, editable=True, preview_html=preview_html,
154 responsibles=[contributor.username for contributor in course.responsible_contributors])
155 return render(request, "contributor_course_form.html", template_data)
156
157
158 @contributor_or_delegate_required
159 def course_preview(request, course_id):
160 user = request.user
161 course = get_object_or_404(Course, id=course_id)
162
163 # check rights
164 if not (course.is_user_contributor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):
165 raise PermissionDenied
166
167 return get_valid_form_groups_or_render_vote_page(request, course, preview=True)[1]
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/contributor/views.py b/evap/contributor/views.py
--- a/evap/contributor/views.py
+++ b/evap/contributor/views.py
@@ -68,12 +68,9 @@
course = get_object_or_404(Course, id=course_id)
# check rights
- if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):
+ if not course.is_user_editor_or_delegate(user) or course.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:
raise PermissionDenied
- if course.is_user_editor_or_delegate(user):
- messages.info(request, _('You cannot edit this course because it has already been approved.'))
-
InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)
form = CourseForm(request.POST or None, instance=course)
@@ -85,7 +82,7 @@
field.disabled = True
template_data = dict(form=form, formset=formset, course=course, editable=False,
- responsibles=[contributor.username for contributor in course.responsible_contributors])
+ responsibles=[contributor.username for contributor in course.responsible_contributors])
return render(request, "contributor_course_form.html", template_data)
|
{"golden_diff": "diff --git a/evap/contributor/views.py b/evap/contributor/views.py\n--- a/evap/contributor/views.py\n+++ b/evap/contributor/views.py\n@@ -68,12 +68,9 @@\n course = get_object_or_404(Course, id=course_id)\n \n # check rights\n- if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n+ if not course.is_user_editor_or_delegate(user) or course.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:\n raise PermissionDenied\n \n- if course.is_user_editor_or_delegate(user):\n- messages.info(request, _('You cannot edit this course because it has already been approved.'))\n-\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n \n form = CourseForm(request.POST or None, instance=course)\n@@ -85,7 +82,7 @@\n field.disabled = True\n \n template_data = dict(form=form, formset=formset, course=course, editable=False,\n- responsibles=[contributor.username for contributor in course.responsible_contributors])\n+ responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n", "issue": "Misguiding messages on contributor's course edit page\nWhen a course is already approved, the contributor course edit page shows a message saying you can't edit this course anymore (introduced in #1076).\r\nAlso, a message instructing the user to edit the data is shown. This is misleading. The second message shouldn't be there.\r\n\r\n\r\n\r\n@janno42 please make sure I'm not missing out anything here.\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.db import IntegrityError, transaction\n\nfrom evap.contributor.forms import CourseForm, DelegatesForm, EditorContributionForm\nfrom evap.evaluation.auth import contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, Semester\nfrom evap.evaluation.tools import STATES_ORDERED, sort_formset\nfrom evap.results.tools import calculate_average_grades_and_deviation\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import get_valid_form_groups_or_render_vote_page\n\n\n@contributor_or_delegate_required\ndef index(request):\n user = request.user\n\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states)\n\n represented_users = user.represented_users.all()\n delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states)\n\n all_courses = list(own_courses) + list(delegated_courses)\n all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state))\n\n for course in all_courses:\n if course.state == 'published':\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active_semester=semester.is_active_semester,\n courses=[course for course in all_courses if course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses)\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n else:\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef course_view(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_editor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n if course.is_user_editor_or_delegate(user):\n messages.info(request, _('You cannot edit this course because it has already been approved.'))\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, course=course, editable=False,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\ndef render_preview(request, formset, course_form, course):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n course_form.save(user=request.user)\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = get_valid_form_groups_or_render_vote_page(request, course, preview=True, for_rendering_in_modal=True)[1].content.decode()\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef course_edit(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_editor_or_delegate(user) and course.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n course_form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course, can_change_responsible=False, form_kwargs={'course': course})\n\n forms_are_valid = course_form.is_valid() and formset.is_valid()\n\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n course_form.save(user=user)\n formset.save()\n\n if post_operation == 'approve':\n course.editor_approve()\n course.save()\n messages.success(request, _(\"Successfully updated and approved course.\"))\n else:\n messages.success(request, _(\"Successfully updated course.\"))\n\n return redirect('contributor:index')\n else:\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, course_form, course)\n\n if not forms_are_valid and (course_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=course_form, formset=formset, course=course, editable=True, preview_html=preview_html,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\n@contributor_or_delegate_required\ndef course_preview(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_contributor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return get_valid_form_groups_or_render_vote_page(request, course, preview=True)[1]\n", "path": "evap/contributor/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied, SuspiciousOperation\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\nfrom django.db import IntegrityError, transaction\n\nfrom evap.contributor.forms import CourseForm, DelegatesForm, EditorContributionForm\nfrom evap.evaluation.auth import contributor_or_delegate_required, editor_or_delegate_required, editor_required\nfrom evap.evaluation.models import Contribution, Course, Semester\nfrom evap.evaluation.tools import STATES_ORDERED, sort_formset\nfrom evap.results.tools import calculate_average_grades_and_deviation\nfrom evap.staff.forms import ContributionFormSet\nfrom evap.student.views import get_valid_form_groups_or_render_vote_page\n\n\n@contributor_or_delegate_required\ndef index(request):\n user = request.user\n\n contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']\n own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states)\n\n represented_users = user.represented_users.all()\n delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states)\n\n all_courses = list(own_courses) + list(delegated_courses)\n all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state))\n\n for course in all_courses:\n if course.state == 'published':\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n semesters = Semester.objects.all()\n semester_list = [dict(\n semester_name=semester.name,\n id=semester.id,\n is_active_semester=semester.is_active_semester,\n courses=[course for course in all_courses if course.semester_id == semester.id]\n ) for semester in semesters]\n\n template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses)\n return render(request, \"contributor_index.html\", template_data)\n\n\n@editor_required\ndef settings_edit(request):\n user = request.user\n form = DelegatesForm(request.POST or None, request.FILES or None, instance=user)\n\n if form.is_valid():\n form.save()\n\n messages.success(request, _(\"Successfully updated your settings.\"))\n return redirect('contributor:settings_edit')\n else:\n return render(request, \"contributor_settings.html\", dict(\n form=form,\n delegate_of=user.represented_users.all(),\n cc_users=user.cc_users.all(),\n ccing_users=user.ccing_users.all(),\n ))\n\n\n@editor_or_delegate_required\ndef course_view(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not course.is_user_editor_or_delegate(user) or course.state not in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']:\n raise PermissionDenied\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=0)\n\n form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course)\n\n # make everything read-only\n for cform in formset.forms + [form]:\n for field in cform.fields.values():\n field.disabled = True\n\n template_data = dict(form=form, formset=formset, course=course, editable=False,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\ndef render_preview(request, formset, course_form, course):\n # open transaction to not let any other requests see anything of what we're doing here\n try:\n with transaction.atomic():\n course_form.save(user=request.user)\n formset.save()\n request.POST = None # this prevents errors rendered in the vote form\n\n preview_response = get_valid_form_groups_or_render_vote_page(request, course, preview=True, for_rendering_in_modal=True)[1].content.decode()\n raise IntegrityError # rollback transaction to discard the database writes\n except IntegrityError:\n pass\n\n return preview_response\n\n\n@editor_or_delegate_required\ndef course_edit(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_editor_or_delegate(user) and course.state == 'prepared'):\n raise PermissionDenied\n\n post_operation = request.POST.get('operation') if request.POST else None\n preview = post_operation == 'preview'\n\n InlineContributionFormset = inlineformset_factory(Course, Contribution, formset=ContributionFormSet, form=EditorContributionForm, extra=1)\n course_form = CourseForm(request.POST or None, instance=course)\n formset = InlineContributionFormset(request.POST or None, instance=course, can_change_responsible=False, form_kwargs={'course': course})\n\n forms_are_valid = course_form.is_valid() and formset.is_valid()\n\n if forms_are_valid and not preview:\n if post_operation not in ('save', 'approve'):\n raise SuspiciousOperation(\"Invalid POST operation\")\n\n course_form.save(user=user)\n formset.save()\n\n if post_operation == 'approve':\n course.editor_approve()\n course.save()\n messages.success(request, _(\"Successfully updated and approved course.\"))\n else:\n messages.success(request, _(\"Successfully updated course.\"))\n\n return redirect('contributor:index')\n else:\n preview_html = None\n if preview and forms_are_valid:\n preview_html = render_preview(request, formset, course_form, course)\n\n if not forms_are_valid and (course_form.errors or formset.errors):\n if preview:\n messages.error(request, _(\"The preview could not be rendered. Please resolve the errors shown below.\"))\n else:\n messages.error(request, _(\"The form was not saved. Please resolve the errors shown below.\"))\n\n sort_formset(request, formset)\n template_data = dict(form=course_form, formset=formset, course=course, editable=True, preview_html=preview_html,\n responsibles=[contributor.username for contributor in course.responsible_contributors])\n return render(request, \"contributor_course_form.html\", template_data)\n\n\n@contributor_or_delegate_required\ndef course_preview(request, course_id):\n user = request.user\n course = get_object_or_404(Course, id=course_id)\n\n # check rights\n if not (course.is_user_contributor_or_delegate(user) and course.state in ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed']):\n raise PermissionDenied\n\n return get_valid_form_groups_or_render_vote_page(request, course, preview=True)[1]\n", "path": "evap/contributor/views.py"}]}
| 2,380 | 333 |
gh_patches_debug_28134
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4701
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should `scrapy.utils.python.WeakKeyCache` be deprecated?
## Summary
I can't find any reference to `scrapy.utils.python.WeakKeyCache` (except in tests) and I think that it should be deprecated.
## Motivation
Keeping non-used and non-documented code makes the codebase harder to maintain.
## Describe alternatives you've considered
As it's tested we could keep it, but I don't think it's a good idea.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/python.py`
Content:
```
1 """
2 This module contains essential stuff that should've come with Python itself ;)
3 """
4 import errno
5 import gc
6 import inspect
7 import re
8 import sys
9 import weakref
10 from functools import partial, wraps
11 from itertools import chain
12
13 from scrapy.utils.decorators import deprecated
14
15
16 def flatten(x):
17 """flatten(sequence) -> list
18
19 Returns a single, flat list which contains all elements retrieved
20 from the sequence and all recursively contained sub-sequences
21 (iterables).
22
23 Examples:
24 >>> [1, 2, [3,4], (5,6)]
25 [1, 2, [3, 4], (5, 6)]
26 >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
27 [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
28 >>> flatten(["foo", "bar"])
29 ['foo', 'bar']
30 >>> flatten(["foo", ["baz", 42], "bar"])
31 ['foo', 'baz', 42, 'bar']
32 """
33 return list(iflatten(x))
34
35
36 def iflatten(x):
37 """iflatten(sequence) -> iterator
38
39 Similar to ``.flatten()``, but returns iterator instead"""
40 for el in x:
41 if is_listlike(el):
42 for el_ in iflatten(el):
43 yield el_
44 else:
45 yield el
46
47
48 def is_listlike(x):
49 """
50 >>> is_listlike("foo")
51 False
52 >>> is_listlike(5)
53 False
54 >>> is_listlike(b"foo")
55 False
56 >>> is_listlike([b"foo"])
57 True
58 >>> is_listlike((b"foo",))
59 True
60 >>> is_listlike({})
61 True
62 >>> is_listlike(set())
63 True
64 >>> is_listlike((x for x in range(3)))
65 True
66 >>> is_listlike(range(5))
67 True
68 """
69 return hasattr(x, "__iter__") and not isinstance(x, (str, bytes))
70
71
72 def unique(list_, key=lambda x: x):
73 """efficient function to uniquify a list preserving item order"""
74 seen = set()
75 result = []
76 for item in list_:
77 seenkey = key(item)
78 if seenkey in seen:
79 continue
80 seen.add(seenkey)
81 result.append(item)
82 return result
83
84
85 def to_unicode(text, encoding=None, errors='strict'):
86 """Return the unicode representation of a bytes object ``text``. If
87 ``text`` is already an unicode object, return it as-is."""
88 if isinstance(text, str):
89 return text
90 if not isinstance(text, (bytes, str)):
91 raise TypeError('to_unicode must receive a bytes or str '
92 'object, got %s' % type(text).__name__)
93 if encoding is None:
94 encoding = 'utf-8'
95 return text.decode(encoding, errors)
96
97
98 def to_bytes(text, encoding=None, errors='strict'):
99 """Return the binary representation of ``text``. If ``text``
100 is already a bytes object, return it as-is."""
101 if isinstance(text, bytes):
102 return text
103 if not isinstance(text, str):
104 raise TypeError('to_bytes must receive a str or bytes '
105 'object, got %s' % type(text).__name__)
106 if encoding is None:
107 encoding = 'utf-8'
108 return text.encode(encoding, errors)
109
110
111 @deprecated('to_unicode')
112 def to_native_str(text, encoding=None, errors='strict'):
113 """ Return str representation of ``text``. """
114 return to_unicode(text, encoding, errors)
115
116
117 def re_rsearch(pattern, text, chunk_size=1024):
118 """
119 This function does a reverse search in a text using a regular expression
120 given in the attribute 'pattern'.
121 Since the re module does not provide this functionality, we have to find for
122 the expression into chunks of text extracted from the end (for the sake of efficiency).
123 At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
124 the pattern. If the pattern is not found, another chunk is extracted, and another
125 search is performed.
126 This process continues until a match is found, or until the whole file is read.
127 In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
128 the start position of the match, and the ending (regarding the entire text).
129 """
130 def _chunk_iter():
131 offset = len(text)
132 while True:
133 offset -= (chunk_size * 1024)
134 if offset <= 0:
135 break
136 yield (text[offset:], offset)
137 yield (text, 0)
138
139 if isinstance(pattern, str):
140 pattern = re.compile(pattern)
141
142 for chunk, offset in _chunk_iter():
143 matches = [match for match in pattern.finditer(chunk)]
144 if matches:
145 start, end = matches[-1].span()
146 return offset + start, offset + end
147 return None
148
149
150 def memoizemethod_noargs(method):
151 """Decorator to cache the result of a method (without arguments) using a
152 weak reference to its object
153 """
154 cache = weakref.WeakKeyDictionary()
155
156 @wraps(method)
157 def new_method(self, *args, **kwargs):
158 if self not in cache:
159 cache[self] = method(self, *args, **kwargs)
160 return cache[self]
161 return new_method
162
163
164 _BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
165 _BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
166
167
168 def binary_is_text(data):
169 """ Returns ``True`` if the given ``data`` argument (a ``bytes`` object)
170 does not contain unprintable control characters.
171 """
172 if not isinstance(data, bytes):
173 raise TypeError("data must be bytes, got '%s'" % type(data).__name__)
174 return all(c not in _BINARYCHARS for c in data)
175
176
177 def _getargspec_py23(func):
178 """_getargspec_py23(function) -> named tuple ArgSpec(args, varargs, keywords,
179 defaults)
180
181 Was identical to inspect.getargspec() in python2, but uses
182 inspect.getfullargspec() for python3 behind the scenes to avoid
183 DeprecationWarning.
184
185 >>> def f(a, b=2, *ar, **kw):
186 ... pass
187
188 >>> _getargspec_py23(f)
189 ArgSpec(args=['a', 'b'], varargs='ar', keywords='kw', defaults=(2,))
190 """
191 return inspect.ArgSpec(*inspect.getfullargspec(func)[:4])
192
193
194 def get_func_args(func, stripself=False):
195 """Return the argument name list of a callable"""
196 if inspect.isfunction(func):
197 func_args, _, _, _ = _getargspec_py23(func)
198 elif inspect.isclass(func):
199 return get_func_args(func.__init__, True)
200 elif inspect.ismethod(func):
201 return get_func_args(func.__func__, True)
202 elif inspect.ismethoddescriptor(func):
203 return []
204 elif isinstance(func, partial):
205 return [x for x in get_func_args(func.func)[len(func.args):]
206 if not (func.keywords and x in func.keywords)]
207 elif hasattr(func, '__call__'):
208 if inspect.isroutine(func):
209 return []
210 elif getattr(func, '__name__', None) == '__call__':
211 return []
212 else:
213 return get_func_args(func.__call__, True)
214 else:
215 raise TypeError('%s is not callable' % type(func))
216 if stripself:
217 func_args.pop(0)
218 return func_args
219
220
221 def get_spec(func):
222 """Returns (args, kwargs) tuple for a function
223 >>> import re
224 >>> get_spec(re.match)
225 (['pattern', 'string'], {'flags': 0})
226
227 >>> class Test:
228 ... def __call__(self, val):
229 ... pass
230 ... def method(self, val, flags=0):
231 ... pass
232
233 >>> get_spec(Test)
234 (['self', 'val'], {})
235
236 >>> get_spec(Test.method)
237 (['self', 'val'], {'flags': 0})
238
239 >>> get_spec(Test().method)
240 (['self', 'val'], {'flags': 0})
241 """
242
243 if inspect.isfunction(func) or inspect.ismethod(func):
244 spec = _getargspec_py23(func)
245 elif hasattr(func, '__call__'):
246 spec = _getargspec_py23(func.__call__)
247 else:
248 raise TypeError('%s is not callable' % type(func))
249
250 defaults = spec.defaults or []
251
252 firstdefault = len(spec.args) - len(defaults)
253 args = spec.args[:firstdefault]
254 kwargs = dict(zip(spec.args[firstdefault:], defaults))
255 return args, kwargs
256
257
258 def equal_attributes(obj1, obj2, attributes):
259 """Compare two objects attributes"""
260 # not attributes given return False by default
261 if not attributes:
262 return False
263
264 temp1, temp2 = object(), object()
265 for attr in attributes:
266 # support callables like itemgetter
267 if callable(attr):
268 if attr(obj1) != attr(obj2):
269 return False
270 elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):
271 return False
272 # all attributes equal
273 return True
274
275
276 class WeakKeyCache:
277
278 def __init__(self, default_factory):
279 self.default_factory = default_factory
280 self._weakdict = weakref.WeakKeyDictionary()
281
282 def __getitem__(self, key):
283 if key not in self._weakdict:
284 self._weakdict[key] = self.default_factory(key)
285 return self._weakdict[key]
286
287
288 @deprecated
289 def retry_on_eintr(function, *args, **kw):
290 """Run a function and retry it while getting EINTR errors"""
291 while True:
292 try:
293 return function(*args, **kw)
294 except IOError as e:
295 if e.errno != errno.EINTR:
296 raise
297
298
299 def without_none_values(iterable):
300 """Return a copy of ``iterable`` with all ``None`` entries removed.
301
302 If ``iterable`` is a mapping, return a dictionary where all pairs that have
303 value ``None`` have been removed.
304 """
305 try:
306 return {k: v for k, v in iterable.items() if v is not None}
307 except AttributeError:
308 return type(iterable)((v for v in iterable if v is not None))
309
310
311 def global_object_name(obj):
312 """
313 Return full name of a global object.
314
315 >>> from scrapy import Request
316 >>> global_object_name(Request)
317 'scrapy.http.request.Request'
318 """
319 return "%s.%s" % (obj.__module__, obj.__name__)
320
321
322 if hasattr(sys, "pypy_version_info"):
323 def garbage_collect():
324 # Collecting weakreferences can take two collections on PyPy.
325 gc.collect()
326 gc.collect()
327 else:
328 def garbage_collect():
329 gc.collect()
330
331
332 class MutableChain:
333 """
334 Thin wrapper around itertools.chain, allowing to add iterables "in-place"
335 """
336
337 def __init__(self, *args):
338 self.data = chain.from_iterable(args)
339
340 def extend(self, *iterables):
341 self.data = chain(self.data, chain.from_iterable(iterables))
342
343 def __iter__(self):
344 return self
345
346 def __next__(self):
347 return next(self.data)
348
349 @deprecated("scrapy.utils.python.MutableChain.__next__")
350 def next(self):
351 return self.__next__()
352
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -6,10 +6,12 @@
import inspect
import re
import sys
+import warnings
import weakref
from functools import partial, wraps
from itertools import chain
+from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.decorators import deprecated
@@ -127,6 +129,7 @@
In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
the start position of the match, and the ending (regarding the entire text).
"""
+
def _chunk_iter():
offset = len(text)
while True:
@@ -158,6 +161,7 @@
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
+
return new_method
@@ -276,6 +280,7 @@
class WeakKeyCache:
def __init__(self, default_factory):
+ warnings.warn("The WeakKeyCache class is deprecated", category=ScrapyDeprecationWarning, stacklevel=2)
self.default_factory = default_factory
self._weakdict = weakref.WeakKeyDictionary()
|
{"golden_diff": "diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py\n--- a/scrapy/utils/python.py\n+++ b/scrapy/utils/python.py\n@@ -6,10 +6,12 @@\n import inspect\n import re\n import sys\n+import warnings\n import weakref\n from functools import partial, wraps\n from itertools import chain\n \n+from scrapy.exceptions import ScrapyDeprecationWarning\n from scrapy.utils.decorators import deprecated\n \n \n@@ -127,6 +129,7 @@\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n+\n def _chunk_iter():\n offset = len(text)\n while True:\n@@ -158,6 +161,7 @@\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n+\n return new_method\n \n \n@@ -276,6 +280,7 @@\n class WeakKeyCache:\n \n def __init__(self, default_factory):\n+ warnings.warn(\"The WeakKeyCache class is deprecated\", category=ScrapyDeprecationWarning, stacklevel=2)\n self.default_factory = default_factory\n self._weakdict = weakref.WeakKeyDictionary()\n", "issue": "Should `scrapy.utils.python.WeakKeyCache` be deprecated?\n## Summary\r\nI can't find any reference to `scrapy.utils.python.WeakKeyCache` (except in tests) and I think that it should be deprecated.\r\n\r\n## Motivation\r\nKeeping non-used and non-documented code makes the codebase harder to maintain.\r\n\r\n## Describe alternatives you've considered\r\nAs it's tested we could keep it, but I don't think it's a good idea.\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\"\"\"\nimport errno\nimport gc\nimport inspect\nimport re\nimport sys\nimport weakref\nfrom functools import partial, wraps\nfrom itertools import chain\n\nfrom scrapy.utils.decorators import deprecated\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n >>> flatten([\"foo\", \"bar\"])\n ['foo', 'bar']\n >>> flatten([\"foo\", [\"baz\", 42], \"bar\"])\n ['foo', 'baz', 42, 'bar']\n \"\"\"\n return list(iflatten(x))\n\n\ndef iflatten(x):\n \"\"\"iflatten(sequence) -> iterator\n\n Similar to ``.flatten()``, but returns iterator instead\"\"\"\n for el in x:\n if is_listlike(el):\n for el_ in iflatten(el):\n yield el_\n else:\n yield el\n\n\ndef is_listlike(x):\n \"\"\"\n >>> is_listlike(\"foo\")\n False\n >>> is_listlike(5)\n False\n >>> is_listlike(b\"foo\")\n False\n >>> is_listlike([b\"foo\"])\n True\n >>> is_listlike((b\"foo\",))\n True\n >>> is_listlike({})\n True\n >>> is_listlike(set())\n True\n >>> is_listlike((x for x in range(3)))\n True\n >>> is_listlike(range(5))\n True\n \"\"\"\n return hasattr(x, \"__iter__\") and not isinstance(x, (str, bytes))\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef to_unicode(text, encoding=None, errors='strict'):\n \"\"\"Return the unicode representation of a bytes object ``text``. If\n ``text`` is already an unicode object, return it as-is.\"\"\"\n if isinstance(text, str):\n return text\n if not isinstance(text, (bytes, str)):\n raise TypeError('to_unicode must receive a bytes or str '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.decode(encoding, errors)\n\n\ndef to_bytes(text, encoding=None, errors='strict'):\n \"\"\"Return the binary representation of ``text``. If ``text``\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, str):\n raise TypeError('to_bytes must receive a str or bytes '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.encode(encoding, errors)\n\n\n@deprecated('to_unicode')\ndef to_native_str(text, encoding=None, errors='strict'):\n \"\"\" Return str representation of ``text``. \"\"\"\n return to_unicode(text, encoding, errors)\n\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= (chunk_size * 1024)\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n start, end = matches[-1].span()\n return offset + start, offset + end\n return None\n\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n return new_method\n\n\n_BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b\"\\0\", b\"\\t\", b\"\\n\", b\"\\r\"}\n_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}\n\n\ndef binary_is_text(data):\n \"\"\" Returns ``True`` if the given ``data`` argument (a ``bytes`` object)\n does not contain unprintable control characters.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes, got '%s'\" % type(data).__name__)\n return all(c not in _BINARYCHARS for c in data)\n\n\ndef _getargspec_py23(func):\n \"\"\"_getargspec_py23(function) -> named tuple ArgSpec(args, varargs, keywords,\n defaults)\n\n Was identical to inspect.getargspec() in python2, but uses\n inspect.getfullargspec() for python3 behind the scenes to avoid\n DeprecationWarning.\n\n >>> def f(a, b=2, *ar, **kw):\n ... pass\n\n >>> _getargspec_py23(f)\n ArgSpec(args=['a', 'b'], varargs='ar', keywords='kw', defaults=(2,))\n \"\"\"\n return inspect.ArgSpec(*inspect.getfullargspec(func)[:4])\n\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n func_args, _, _, _ = _getargspec_py23(func)\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [x for x in get_func_args(func.func)[len(func.args):]\n if not (func.keywords and x in func.keywords)]\n elif hasattr(func, '__call__'):\n if inspect.isroutine(func):\n return []\n elif getattr(func, '__name__', None) == '__call__':\n return []\n else:\n return get_func_args(func.__call__, True)\n else:\n raise TypeError('%s is not callable' % type(func))\n if stripself:\n func_args.pop(0)\n return func_args\n\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test:\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = _getargspec_py23(func)\n elif hasattr(func, '__call__'):\n spec = _getargspec_py23(func.__call__)\n else:\n raise TypeError('%s is not callable' % type(func))\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n temp1, temp2 = object(), object()\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if attr(obj1) != attr(obj2):\n return False\n elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n return False\n # all attributes equal\n return True\n\n\nclass WeakKeyCache:\n\n def __init__(self, default_factory):\n self.default_factory = default_factory\n self._weakdict = weakref.WeakKeyDictionary()\n\n def __getitem__(self, key):\n if key not in self._weakdict:\n self._weakdict[key] = self.default_factory(key)\n return self._weakdict[key]\n\n\n@deprecated\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"Run a function and retry it while getting EINTR errors\"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno != errno.EINTR:\n raise\n\n\ndef without_none_values(iterable):\n \"\"\"Return a copy of ``iterable`` with all ``None`` entries removed.\n\n If ``iterable`` is a mapping, return a dictionary where all pairs that have\n value ``None`` have been removed.\n \"\"\"\n try:\n return {k: v for k, v in iterable.items() if v is not None}\n except AttributeError:\n return type(iterable)((v for v in iterable if v is not None))\n\n\ndef global_object_name(obj):\n \"\"\"\n Return full name of a global object.\n\n >>> from scrapy import Request\n >>> global_object_name(Request)\n 'scrapy.http.request.Request'\n \"\"\"\n return \"%s.%s\" % (obj.__module__, obj.__name__)\n\n\nif hasattr(sys, \"pypy_version_info\"):\n def garbage_collect():\n # Collecting weakreferences can take two collections on PyPy.\n gc.collect()\n gc.collect()\nelse:\n def garbage_collect():\n gc.collect()\n\n\nclass MutableChain:\n \"\"\"\n Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"\n \"\"\"\n\n def __init__(self, *args):\n self.data = chain.from_iterable(args)\n\n def extend(self, *iterables):\n self.data = chain(self.data, chain.from_iterable(iterables))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.data)\n\n @deprecated(\"scrapy.utils.python.MutableChain.__next__\")\n def next(self):\n return self.__next__()\n", "path": "scrapy/utils/python.py"}], "after_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\"\"\"\nimport errno\nimport gc\nimport inspect\nimport re\nimport sys\nimport warnings\nimport weakref\nfrom functools import partial, wraps\nfrom itertools import chain\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.decorators import deprecated\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\n >>> flatten([\"foo\", \"bar\"])\n ['foo', 'bar']\n >>> flatten([\"foo\", [\"baz\", 42], \"bar\"])\n ['foo', 'baz', 42, 'bar']\n \"\"\"\n return list(iflatten(x))\n\n\ndef iflatten(x):\n \"\"\"iflatten(sequence) -> iterator\n\n Similar to ``.flatten()``, but returns iterator instead\"\"\"\n for el in x:\n if is_listlike(el):\n for el_ in iflatten(el):\n yield el_\n else:\n yield el\n\n\ndef is_listlike(x):\n \"\"\"\n >>> is_listlike(\"foo\")\n False\n >>> is_listlike(5)\n False\n >>> is_listlike(b\"foo\")\n False\n >>> is_listlike([b\"foo\"])\n True\n >>> is_listlike((b\"foo\",))\n True\n >>> is_listlike({})\n True\n >>> is_listlike(set())\n True\n >>> is_listlike((x for x in range(3)))\n True\n >>> is_listlike(range(5))\n True\n \"\"\"\n return hasattr(x, \"__iter__\") and not isinstance(x, (str, bytes))\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef to_unicode(text, encoding=None, errors='strict'):\n \"\"\"Return the unicode representation of a bytes object ``text``. If\n ``text`` is already an unicode object, return it as-is.\"\"\"\n if isinstance(text, str):\n return text\n if not isinstance(text, (bytes, str)):\n raise TypeError('to_unicode must receive a bytes or str '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.decode(encoding, errors)\n\n\ndef to_bytes(text, encoding=None, errors='strict'):\n \"\"\"Return the binary representation of ``text``. If ``text``\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, str):\n raise TypeError('to_bytes must receive a str or bytes '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.encode(encoding, errors)\n\n\n@deprecated('to_unicode')\ndef to_native_str(text, encoding=None, errors='strict'):\n \"\"\" Return str representation of ``text``. \"\"\"\n return to_unicode(text, encoding, errors)\n\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= (chunk_size * 1024)\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n if isinstance(pattern, str):\n pattern = re.compile(pattern)\n\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n start, end = matches[-1].span()\n return offset + start, offset + end\n return None\n\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n\n return new_method\n\n\n_BINARYCHARS = {to_bytes(chr(i)) for i in range(32)} - {b\"\\0\", b\"\\t\", b\"\\n\", b\"\\r\"}\n_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}\n\n\ndef binary_is_text(data):\n \"\"\" Returns ``True`` if the given ``data`` argument (a ``bytes`` object)\n does not contain unprintable control characters.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes, got '%s'\" % type(data).__name__)\n return all(c not in _BINARYCHARS for c in data)\n\n\ndef _getargspec_py23(func):\n \"\"\"_getargspec_py23(function) -> named tuple ArgSpec(args, varargs, keywords,\n defaults)\n\n Was identical to inspect.getargspec() in python2, but uses\n inspect.getfullargspec() for python3 behind the scenes to avoid\n DeprecationWarning.\n\n >>> def f(a, b=2, *ar, **kw):\n ... pass\n\n >>> _getargspec_py23(f)\n ArgSpec(args=['a', 'b'], varargs='ar', keywords='kw', defaults=(2,))\n \"\"\"\n return inspect.ArgSpec(*inspect.getfullargspec(func)[:4])\n\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n func_args, _, _, _ = _getargspec_py23(func)\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [x for x in get_func_args(func.func)[len(func.args):]\n if not (func.keywords and x in func.keywords)]\n elif hasattr(func, '__call__'):\n if inspect.isroutine(func):\n return []\n elif getattr(func, '__name__', None) == '__call__':\n return []\n else:\n return get_func_args(func.__call__, True)\n else:\n raise TypeError('%s is not callable' % type(func))\n if stripself:\n func_args.pop(0)\n return func_args\n\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test:\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = _getargspec_py23(func)\n elif hasattr(func, '__call__'):\n spec = _getargspec_py23(func.__call__)\n else:\n raise TypeError('%s is not callable' % type(func))\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n temp1, temp2 = object(), object()\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if attr(obj1) != attr(obj2):\n return False\n elif getattr(obj1, attr, temp1) != getattr(obj2, attr, temp2):\n return False\n # all attributes equal\n return True\n\n\nclass WeakKeyCache:\n\n def __init__(self, default_factory):\n warnings.warn(\"The WeakKeyCache class is deprecated\", category=ScrapyDeprecationWarning, stacklevel=2)\n self.default_factory = default_factory\n self._weakdict = weakref.WeakKeyDictionary()\n\n def __getitem__(self, key):\n if key not in self._weakdict:\n self._weakdict[key] = self.default_factory(key)\n return self._weakdict[key]\n\n\n@deprecated\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"Run a function and retry it while getting EINTR errors\"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno != errno.EINTR:\n raise\n\n\ndef without_none_values(iterable):\n \"\"\"Return a copy of ``iterable`` with all ``None`` entries removed.\n\n If ``iterable`` is a mapping, return a dictionary where all pairs that have\n value ``None`` have been removed.\n \"\"\"\n try:\n return {k: v for k, v in iterable.items() if v is not None}\n except AttributeError:\n return type(iterable)((v for v in iterable if v is not None))\n\n\ndef global_object_name(obj):\n \"\"\"\n Return full name of a global object.\n\n >>> from scrapy import Request\n >>> global_object_name(Request)\n 'scrapy.http.request.Request'\n \"\"\"\n return \"%s.%s\" % (obj.__module__, obj.__name__)\n\n\nif hasattr(sys, \"pypy_version_info\"):\n def garbage_collect():\n # Collecting weakreferences can take two collections on PyPy.\n gc.collect()\n gc.collect()\nelse:\n def garbage_collect():\n gc.collect()\n\n\nclass MutableChain:\n \"\"\"\n Thin wrapper around itertools.chain, allowing to add iterables \"in-place\"\n \"\"\"\n\n def __init__(self, *args):\n self.data = chain.from_iterable(args)\n\n def extend(self, *iterables):\n self.data = chain(self.data, chain.from_iterable(iterables))\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return next(self.data)\n\n @deprecated(\"scrapy.utils.python.MutableChain.__next__\")\n def next(self):\n return self.__next__()\n", "path": "scrapy/utils/python.py"}]}
| 3,900 | 289 |
gh_patches_debug_19135
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-903
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled value out of range
## Sentry link
<!-- The public (aka "share") Sentry issue link. -->
https://sentry.io/share/issue/7dd836d27daa41dfa59dfbdf39a40d51/
## Description
<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->
<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->
We need to capture and handle this error to avoid returning a 500 error in the link below.
> 'l' format requires -2147483648 <= number <= 2147483647
Originates from this fragment of code:
https://github.com/WordPress/openverse-api/blob/81b7860e2662f4b135919da0728eea194f15c260/api/catalog/api/views/image_views.py#L125-L130
<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->
## Reproduction
<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren’t able to. -->
1. Go to https://api-dev.openverse.engineering/v1/images/6c855680-b426-40f1-adc6-f61f434d0be9/watermark/
## Aditional context
Related to #483.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/views/image_views.py`
Content:
```
1 import io
2
3 from django.conf import settings
4 from django.http.response import FileResponse, HttpResponse
5 from django.utils.decorators import method_decorator
6 from rest_framework.decorators import action
7 from rest_framework.exceptions import NotFound
8 from rest_framework.response import Response
9
10 import piexif
11 import requests
12 from drf_yasg.utils import swagger_auto_schema
13 from PIL import Image as PILImage
14
15 from catalog.api.constants.media_types import IMAGE_TYPE
16 from catalog.api.docs.image_docs import (
17 ImageComplain,
18 ImageDetail,
19 ImageOembed,
20 ImageRelated,
21 ImageSearch,
22 ImageStats,
23 ImageThumbnail,
24 )
25 from catalog.api.models import Image
26 from catalog.api.serializers.image_serializers import (
27 ImageReportRequestSerializer,
28 ImageSearchRequestSerializer,
29 ImageSerializer,
30 OembedRequestSerializer,
31 OembedSerializer,
32 WatermarkRequestSerializer,
33 )
34 from catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer
35 from catalog.api.utils.exceptions import get_api_exception
36 from catalog.api.utils.throttle import (
37 AnonThumbnailRateThrottle,
38 OAuth2IdThumbnailRateThrottle,
39 )
40 from catalog.api.utils.watermark import watermark
41 from catalog.api.views.media_views import MediaViewSet
42
43
44 @method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), "list")
45 @method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), "stats")
46 @method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), "retrieve")
47 @method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), "related")
48 @method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), "report")
49 @method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), "oembed")
50 @method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), "thumbnail")
51 @method_decorator(swagger_auto_schema(auto_schema=None), "watermark")
52 class ImageViewSet(MediaViewSet):
53 """
54 Viewset for all endpoints pertaining to images.
55 """
56
57 model_class = Image
58 query_serializer_class = ImageSearchRequestSerializer
59 default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]
60 qa_index = "search-qa-image"
61
62 serializer_class = ImageSerializer
63
64 OEMBED_HEADERS = {
65 "User-Agent": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose="OEmbed"),
66 }
67
68 # Extra actions
69
70 @action(
71 detail=False,
72 url_path="oembed",
73 url_name="oembed",
74 serializer_class=OembedSerializer,
75 )
76 def oembed(self, request, *_, **__):
77 params = OembedRequestSerializer(data=request.query_params)
78 params.is_valid(raise_exception=True)
79
80 context = self.get_serializer_context()
81
82 url = params.validated_data["url"]
83 identifier = url.rsplit("/", 1)[1]
84 try:
85 image = self.get_queryset().get(identifier=identifier)
86 except Image.DoesNotExist:
87 return get_api_exception("Could not find image.", 404)
88 if not (image.height and image.width):
89 image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)
90 width, height = PILImage.open(io.BytesIO(image_file.content)).size
91 context |= {
92 "width": width,
93 "height": height,
94 }
95
96 serializer = self.get_serializer(image, context=context)
97 return Response(data=serializer.data)
98
99 @action(
100 detail=True,
101 url_path="thumb",
102 url_name="thumb",
103 serializer_class=MediaThumbnailRequestSerializer,
104 throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],
105 )
106 def thumbnail(self, request, *_, **__):
107 image = self.get_object()
108
109 image_url = image.url
110 if not image_url:
111 raise get_api_exception("Could not find image.", 404)
112
113 return super().thumbnail(image_url, request)
114
115 @action(detail=True, url_path="watermark", url_name="watermark")
116 def watermark(self, request, *_, **__):
117 if not settings.WATERMARK_ENABLED:
118 raise NotFound("The watermark feature is currently disabled.")
119
120 params = WatermarkRequestSerializer(data=request.query_params)
121 params.is_valid(raise_exception=True)
122
123 image = self.get_object()
124 image_url = image.url
125 image_info = {
126 attr: getattr(image, attr)
127 for attr in ["title", "creator", "license", "license_version"]
128 }
129
130 # Create the actual watermarked image.
131 watermarked, exif = watermark(image_url, image_info, params.data["watermark"])
132 # Re-insert EXIF metadata.
133 if exif:
134 exif_bytes = piexif.dump(exif)
135 else:
136 exif_bytes = None
137 img_bytes = io.BytesIO()
138 self._save_wrapper(watermarked, exif_bytes, img_bytes)
139
140 if params.data["embed_metadata"]:
141 # Embed ccREL metadata with XMP.
142 work_properties = {
143 "creator": image.creator,
144 "license_url": image.license_url,
145 "attribution": image.attribution,
146 "work_landing_page": image.foreign_landing_url,
147 "identifier": str(image.identifier),
148 }
149
150 # Import inside a function to allow server run without Exempi library
151 import libxmp
152
153 from catalog.api.utils import ccrel
154
155 try:
156 with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)
157 return FileResponse(with_xmp, content_type="image/jpeg")
158 except (libxmp.XMPError, AttributeError):
159 # Just send the EXIF-ified file if libxmp fails to add metadata
160 response = HttpResponse(content_type="image/jpeg")
161 self._save_wrapper(watermarked, exif_bytes, response)
162 return response
163 else:
164 response = HttpResponse(img_bytes, content_type="image/jpeg")
165 self._save_wrapper(watermarked, exif_bytes, response)
166 return response
167
168 @action(
169 detail=True,
170 methods=["post"],
171 serializer_class=ImageReportRequestSerializer,
172 )
173 def report(self, *args, **kwargs):
174 return super().report(*args, **kwargs)
175
176 # Helper functions
177
178 @staticmethod
179 def _save_wrapper(pil_img, exif_bytes, destination):
180 """
181 PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering
182 the code with branches.
183 """
184 if exif_bytes:
185 pil_img.save(destination, "jpeg", exif=exif_bytes)
186 else:
187 pil_img.save(destination, "jpeg")
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py
--- a/api/catalog/api/views/image_views.py
+++ b/api/catalog/api/views/image_views.py
@@ -1,4 +1,5 @@
import io
+import struct
from django.conf import settings
from django.http.response import FileResponse, HttpResponse
@@ -131,7 +132,14 @@
watermarked, exif = watermark(image_url, image_info, params.data["watermark"])
# Re-insert EXIF metadata.
if exif:
- exif_bytes = piexif.dump(exif)
+ # piexif dump raises InvalidImageDataError which is a child class
+ # of ValueError, and a struct error when the value is not
+ # between -2147483648 and 2147483647
+ # https://github.com/WordPress/openverse-api/issues/849
+ try:
+ exif_bytes = piexif.dump(exif)
+ except (struct.error, ValueError):
+ exif_bytes = None
else:
exif_bytes = None
img_bytes = io.BytesIO()
|
{"golden_diff": "diff --git a/api/catalog/api/views/image_views.py b/api/catalog/api/views/image_views.py\n--- a/api/catalog/api/views/image_views.py\n+++ b/api/catalog/api/views/image_views.py\n@@ -1,4 +1,5 @@\n import io\n+import struct\n \n from django.conf import settings\n from django.http.response import FileResponse, HttpResponse\n@@ -131,7 +132,14 @@\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n- exif_bytes = piexif.dump(exif)\n+ # piexif dump raises InvalidImageDataError which is a child class\n+ # of ValueError, and a struct error when the value is not\n+ # between -2147483648 and 2147483647\n+ # https://github.com/WordPress/openverse-api/issues/849\n+ try:\n+ exif_bytes = piexif.dump(exif)\n+ except (struct.error, ValueError):\n+ exif_bytes = None\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n", "issue": "Unhandled value out of range\n## Sentry link\r\n<!-- The public (aka \"share\") Sentry issue link. -->\r\nhttps://sentry.io/share/issue/7dd836d27daa41dfa59dfbdf39a40d51/\r\n\r\n## Description\r\n<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->\r\n<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->\r\n\r\nWe need to capture and handle this error to avoid returning a 500 error in the link below.\r\n\r\n> 'l' format requires -2147483648 <= number <= 2147483647\r\n\r\nOriginates from this fragment of code:\r\n\r\nhttps://github.com/WordPress/openverse-api/blob/81b7860e2662f4b135919da0728eea194f15c260/api/catalog/api/views/image_views.py#L125-L130\r\n\r\n<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->\r\n\r\n## Reproduction\r\n<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren\u2019t able to. -->\r\n1. Go to https://api-dev.openverse.engineering/v1/images/6c855680-b426-40f1-adc6-f61f434d0be9/watermark/\r\n\r\n## Aditional context\r\nRelated to #483.\r\n\n", "before_files": [{"content": "import io\n\nfrom django.conf import settings\nfrom django.http.response import FileResponse, HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\n\nimport piexif\nimport requests\nfrom drf_yasg.utils import swagger_auto_schema\nfrom PIL import Image as PILImage\n\nfrom catalog.api.constants.media_types import IMAGE_TYPE\nfrom catalog.api.docs.image_docs import (\n ImageComplain,\n ImageDetail,\n ImageOembed,\n ImageRelated,\n ImageSearch,\n ImageStats,\n ImageThumbnail,\n)\nfrom catalog.api.models import Image\nfrom catalog.api.serializers.image_serializers import (\n ImageReportRequestSerializer,\n ImageSearchRequestSerializer,\n ImageSerializer,\n OembedRequestSerializer,\n OembedSerializer,\n WatermarkRequestSerializer,\n)\nfrom catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.throttle import (\n AnonThumbnailRateThrottle,\n OAuth2IdThumbnailRateThrottle,\n)\nfrom catalog.api.utils.watermark import watermark\nfrom catalog.api.views.media_views import MediaViewSet\n\n\n@method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), \"list\")\n@method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), \"stats\")\n@method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), \"retrieve\")\n@method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), \"related\")\n@method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), \"report\")\n@method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), \"oembed\")\n@method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), \"thumbnail\")\n@method_decorator(swagger_auto_schema(auto_schema=None), \"watermark\")\nclass ImageViewSet(MediaViewSet):\n \"\"\"\n Viewset for all endpoints pertaining to images.\n \"\"\"\n\n model_class = Image\n query_serializer_class = ImageSearchRequestSerializer\n default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]\n qa_index = \"search-qa-image\"\n\n serializer_class = ImageSerializer\n\n OEMBED_HEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"OEmbed\"),\n }\n\n # Extra actions\n\n @action(\n detail=False,\n url_path=\"oembed\",\n url_name=\"oembed\",\n serializer_class=OembedSerializer,\n )\n def oembed(self, request, *_, **__):\n params = OembedRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n context = self.get_serializer_context()\n\n url = params.validated_data[\"url\"]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n return get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n context |= {\n \"width\": width,\n \"height\": height,\n }\n\n serializer = self.get_serializer(image, context=context)\n return Response(data=serializer.data)\n\n @action(\n detail=True,\n url_path=\"thumb\",\n url_name=\"thumb\",\n serializer_class=MediaThumbnailRequestSerializer,\n throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],\n )\n def thumbnail(self, request, *_, **__):\n image = self.get_object()\n\n image_url = image.url\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n\n return super().thumbnail(image_url, request)\n\n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n def watermark(self, request, *_, **__):\n if not settings.WATERMARK_ENABLED:\n raise NotFound(\"The watermark feature is currently disabled.\")\n\n params = WatermarkRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n image = self.get_object()\n image_url = image.url\n image_info = {\n attr: getattr(image, attr)\n for attr in [\"title\", \"creator\", \"license\", \"license_version\"]\n }\n\n # Create the actual watermarked image.\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n exif_bytes = piexif.dump(exif)\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n self._save_wrapper(watermarked, exif_bytes, img_bytes)\n\n if params.data[\"embed_metadata\"]:\n # Embed ccREL metadata with XMP.\n work_properties = {\n \"creator\": image.creator,\n \"license_url\": image.license_url,\n \"attribution\": image.attribution,\n \"work_landing_page\": image.foreign_landing_url,\n \"identifier\": str(image.identifier),\n }\n\n # Import inside a function to allow server run without Exempi library\n import libxmp\n\n from catalog.api.utils import ccrel\n\n try:\n with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)\n return FileResponse(with_xmp, content_type=\"image/jpeg\")\n except (libxmp.XMPError, AttributeError):\n # Just send the EXIF-ified file if libxmp fails to add metadata\n response = HttpResponse(content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n else:\n response = HttpResponse(img_bytes, content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n\n @action(\n detail=True,\n methods=[\"post\"],\n serializer_class=ImageReportRequestSerializer,\n )\n def report(self, *args, **kwargs):\n return super().report(*args, **kwargs)\n\n # Helper functions\n\n @staticmethod\n def _save_wrapper(pil_img, exif_bytes, destination):\n \"\"\"\n PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering\n the code with branches.\n \"\"\"\n if exif_bytes:\n pil_img.save(destination, \"jpeg\", exif=exif_bytes)\n else:\n pil_img.save(destination, \"jpeg\")\n", "path": "api/catalog/api/views/image_views.py"}], "after_files": [{"content": "import io\nimport struct\n\nfrom django.conf import settings\nfrom django.http.response import FileResponse, HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\n\nimport piexif\nimport requests\nfrom drf_yasg.utils import swagger_auto_schema\nfrom PIL import Image as PILImage\n\nfrom catalog.api.constants.media_types import IMAGE_TYPE\nfrom catalog.api.docs.image_docs import (\n ImageComplain,\n ImageDetail,\n ImageOembed,\n ImageRelated,\n ImageSearch,\n ImageStats,\n ImageThumbnail,\n)\nfrom catalog.api.models import Image\nfrom catalog.api.serializers.image_serializers import (\n ImageReportRequestSerializer,\n ImageSearchRequestSerializer,\n ImageSerializer,\n OembedRequestSerializer,\n OembedSerializer,\n WatermarkRequestSerializer,\n)\nfrom catalog.api.serializers.media_serializers import MediaThumbnailRequestSerializer\nfrom catalog.api.utils.exceptions import get_api_exception\nfrom catalog.api.utils.throttle import (\n AnonThumbnailRateThrottle,\n OAuth2IdThumbnailRateThrottle,\n)\nfrom catalog.api.utils.watermark import watermark\nfrom catalog.api.views.media_views import MediaViewSet\n\n\n@method_decorator(swagger_auto_schema(**ImageSearch.swagger_setup), \"list\")\n@method_decorator(swagger_auto_schema(**ImageStats.swagger_setup), \"stats\")\n@method_decorator(swagger_auto_schema(**ImageDetail.swagger_setup), \"retrieve\")\n@method_decorator(swagger_auto_schema(**ImageRelated.swagger_setup), \"related\")\n@method_decorator(swagger_auto_schema(**ImageComplain.swagger_setup), \"report\")\n@method_decorator(swagger_auto_schema(**ImageOembed.swagger_setup), \"oembed\")\n@method_decorator(swagger_auto_schema(**ImageThumbnail.swagger_setup), \"thumbnail\")\n@method_decorator(swagger_auto_schema(auto_schema=None), \"watermark\")\nclass ImageViewSet(MediaViewSet):\n \"\"\"\n Viewset for all endpoints pertaining to images.\n \"\"\"\n\n model_class = Image\n query_serializer_class = ImageSearchRequestSerializer\n default_index = settings.MEDIA_INDEX_MAPPING[IMAGE_TYPE]\n qa_index = \"search-qa-image\"\n\n serializer_class = ImageSerializer\n\n OEMBED_HEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"OEmbed\"),\n }\n\n # Extra actions\n\n @action(\n detail=False,\n url_path=\"oembed\",\n url_name=\"oembed\",\n serializer_class=OembedSerializer,\n )\n def oembed(self, request, *_, **__):\n params = OembedRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n context = self.get_serializer_context()\n\n url = params.validated_data[\"url\"]\n identifier = url.rsplit(\"/\", 1)[1]\n try:\n image = self.get_queryset().get(identifier=identifier)\n except Image.DoesNotExist:\n return get_api_exception(\"Could not find image.\", 404)\n if not (image.height and image.width):\n image_file = requests.get(image.url, headers=self.OEMBED_HEADERS)\n width, height = PILImage.open(io.BytesIO(image_file.content)).size\n context |= {\n \"width\": width,\n \"height\": height,\n }\n\n serializer = self.get_serializer(image, context=context)\n return Response(data=serializer.data)\n\n @action(\n detail=True,\n url_path=\"thumb\",\n url_name=\"thumb\",\n serializer_class=MediaThumbnailRequestSerializer,\n throttle_classes=[AnonThumbnailRateThrottle, OAuth2IdThumbnailRateThrottle],\n )\n def thumbnail(self, request, *_, **__):\n image = self.get_object()\n\n image_url = image.url\n if not image_url:\n raise get_api_exception(\"Could not find image.\", 404)\n\n return super().thumbnail(image_url, request)\n\n @action(detail=True, url_path=\"watermark\", url_name=\"watermark\")\n def watermark(self, request, *_, **__):\n if not settings.WATERMARK_ENABLED:\n raise NotFound(\"The watermark feature is currently disabled.\")\n\n params = WatermarkRequestSerializer(data=request.query_params)\n params.is_valid(raise_exception=True)\n\n image = self.get_object()\n image_url = image.url\n image_info = {\n attr: getattr(image, attr)\n for attr in [\"title\", \"creator\", \"license\", \"license_version\"]\n }\n\n # Create the actual watermarked image.\n watermarked, exif = watermark(image_url, image_info, params.data[\"watermark\"])\n # Re-insert EXIF metadata.\n if exif:\n # piexif dump raises InvalidImageDataError which is a child class\n # of ValueError, and a struct error when the value is not\n # between -2147483648 and 2147483647\n # https://github.com/WordPress/openverse-api/issues/849\n try:\n exif_bytes = piexif.dump(exif)\n except (struct.error, ValueError):\n exif_bytes = None\n else:\n exif_bytes = None\n img_bytes = io.BytesIO()\n self._save_wrapper(watermarked, exif_bytes, img_bytes)\n\n if params.data[\"embed_metadata\"]:\n # Embed ccREL metadata with XMP.\n work_properties = {\n \"creator\": image.creator,\n \"license_url\": image.license_url,\n \"attribution\": image.attribution,\n \"work_landing_page\": image.foreign_landing_url,\n \"identifier\": str(image.identifier),\n }\n\n # Import inside a function to allow server run without Exempi library\n import libxmp\n\n from catalog.api.utils import ccrel\n\n try:\n with_xmp = ccrel.embed_xmp_bytes(img_bytes, work_properties)\n return FileResponse(with_xmp, content_type=\"image/jpeg\")\n except (libxmp.XMPError, AttributeError):\n # Just send the EXIF-ified file if libxmp fails to add metadata\n response = HttpResponse(content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n else:\n response = HttpResponse(img_bytes, content_type=\"image/jpeg\")\n self._save_wrapper(watermarked, exif_bytes, response)\n return response\n\n @action(\n detail=True,\n methods=[\"post\"],\n serializer_class=ImageReportRequestSerializer,\n )\n def report(self, *args, **kwargs):\n return super().report(*args, **kwargs)\n\n # Helper functions\n\n @staticmethod\n def _save_wrapper(pil_img, exif_bytes, destination):\n \"\"\"\n PIL crashes if exif_bytes=None, so we have to wrap it to avoid littering\n the code with branches.\n \"\"\"\n if exif_bytes:\n pil_img.save(destination, \"jpeg\", exif=exif_bytes)\n else:\n pil_img.save(destination, \"jpeg\")\n", "path": "api/catalog/api/views/image_views.py"}]}
| 2,480 | 265 |
gh_patches_debug_1132
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1790
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Guard SCHEMA_VERSION from version bumps
I don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.
This is going to be a problem when
https://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2
hits `1.0.0` and conflicts with
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13
and also has to properly pick up the multiple correct instances in
https://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145
_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789
7 'tensorflow-probability>=0.11.0', # c.f. PR #1657
8 ],
9 'torch': ['torch>=1.10.0'], # c.f. PR #1657
10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501
11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567
12 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
23 extras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'scikit-hep-testdata>=0.4.11',
33 'pytest>=6.0',
34 'pytest-cov>=2.5.1',
35 'pytest-mock',
36 'requests-mock>=1.9.0',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'pytest-mpl',
40 'pydocstyle',
41 'papermill~=2.0',
42 'nteract-scrapbook~=0.2',
43 'jupyter',
44 'graphviz',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 set(
50 extras_require['xmlio']
51 + extras_require['contrib']
52 + [
53 'sphinx>=4.0.0',
54 'sphinxcontrib-bibtex~=2.1',
55 'sphinx-click',
56 'sphinx_rtd_theme',
57 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620
58 'ipywidgets',
59 'sphinx-issues',
60 'sphinx-copybutton>=0.3.2',
61 ]
62 )
63 )
64 extras_require['develop'] = sorted(
65 set(
66 extras_require['docs']
67 + extras_require['lint']
68 + extras_require['test']
69 + [
70 'nbdime',
71 'bump2version',
72 'ipython',
73 'pre-commit',
74 'check-manifest',
75 'codemetapy>=0.3.4',
76 'twine',
77 ]
78 )
79 )
80 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
81
82
83 setup(
84 extras_require=extras_require,
85 use_scm_version=lambda: {'local_scheme': lambda version: ''},
86 )
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,7 +68,7 @@
+ extras_require['test']
+ [
'nbdime',
- 'bump2version',
+ 'tbump>=6.7.0',
'ipython',
'pre-commit',
'check-manifest',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,7 +68,7 @@\n + extras_require['test']\n + [\n 'nbdime',\n- 'bump2version',\n+ 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n", "issue": "Guard SCHEMA_VERSION from version bumps\nI don't think it is going to be possible to guard the `SCHEMA_VERSION` from `bump2version` so we might need to look for a replacement for `bump2version` that gives guard support.\r\n\r\nThis is going to be a problem when \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/6b0a9317b14da2a452f51d089cb9e493c8f19347/.bumpversion.cfg#L1-L2\r\n\r\nhits `1.0.0` and conflicts with \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L13\r\n\r\nand also has to properly pick up the multiple correct instances in \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/f824afe77d9e48e90651931700ccfc3d3c268c18/src/pyhf/utils.py#L145\r\n\r\n_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/1218#issuecomment-744590434_\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.3.1', # c.f. https://github.com/tensorflow/tensorflow/pull/40789\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.60,!=0.1.68'], # c.f. Issue 1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.4.0'], # c.f. PR #1306\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black>=22.1.0'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=4.0.0',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,436 | 84 |
gh_patches_debug_15998
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-2168
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] forecasting API docs page is broken
**Describe the bug**
<!--
No ARIMA, BATS and Prophet documentation in the API References
-->
Hello,
In your API documentation (https://www.sktime.org/en/v0.10.0/api_reference/forecasting.html) there is no references for prophet, ARIMA and BATS/TBATS

</details>
<!-- Thanks for contributing! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/forecasting/fbprophet.py`
Content:
```
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Implements Prophet forecaster by wrapping fbprophet."""
5
6 __author__ = ["aiwalter"]
7 __all__ = ["Prophet"]
8
9
10 from sktime.forecasting.base._base import DEFAULT_ALPHA
11 from sktime.forecasting.base.adapters import _ProphetAdapter
12 from sktime.utils.validation._dependencies import _check_soft_dependencies
13
14 _check_soft_dependencies("prophet", severity="warning")
15
16
17 class Prophet(_ProphetAdapter):
18 """Prophet forecaster by wrapping Facebook's prophet algorithm [1]_.
19
20 Parameters
21 ----------
22 freq: String of DatetimeIndex frequency. Refer [2]_ for possible values:
23 add_seasonality: dict or None, default=None
24 Dict with args for Prophet.add_seasonality().
25 Dict can have the following keys/values:
26 * name: string name of the seasonality component.
27 * period: float number of days in one period.
28 * fourier_order: int number of Fourier components to use.
29 * prior_scale: optional float prior scale for this component.
30 * mode: optional 'additive' or 'multiplicative'
31 * condition_name: string name of the seasonality condition.
32 add_country_holidays: dict or None, default=None
33 Dict with args for Prophet.add_country_holidays().
34 Dict can have the following keys/values:
35 country_name: Name of the country, like 'UnitedStates' or 'US'
36 growth: str, default="linear"
37 String 'linear' or 'logistic' to specify a linear or logistic
38 trend.
39 changepoints: list or None, default=None
40 List of dates at which to include potential changepoints. If
41 not specified, potential changepoints are selected automatically.
42 n_changepoints: int, default=25
43 Number of potential changepoints to include. Not used
44 if input `changepoints` is supplied. If `changepoints` is not supplied,
45 then n_changepoints potential changepoints are selected uniformly from
46 the first `changepoint_range` proportion of the history.
47 changepoint_range: float, default=0.8
48 Proportion of history in which trend changepoints will
49 be estimated. Defaults to 0.8 for the first 80%. Not used if
50 `changepoints` is specified.
51 yearly_seasonality: str or bool or int, default="auto"
52 Fit yearly seasonality.
53 Can be 'auto', True, False, or a number of Fourier terms to generate.
54 weekly_seasonality: str or bool or int, default="auto"
55 Fit weekly seasonality.
56 Can be 'auto', True, False, or a number of Fourier terms to generate.
57 daily_seasonality: str or bool or int, default="auto"
58 Fit daily seasonality.
59 Can be 'auto', True, False, or a number of Fourier terms to generate.
60 holidays: pd.DataFrame or None, default=None
61 pd.DataFrame with columns holiday (string) and ds (date type)
62 and optionally columns lower_window and upper_window which specify a
63 range of days around the date to be included as holidays.
64 lower_window=-2 will include 2 days prior to the date as holidays. Also
65 optionally can have a column prior_scale specifying the prior scale for
66 that holiday.
67 seasonality_mode: str, default='additive'
68 Take one of 'additive' or 'multiplicative'.
69 seasonality_prior_scale: float, default=10.0
70 Parameter modulating the strength of the seasonality model.
71 Larger values allow the model to fit larger seasonal
72 fluctuations, smaller values dampen the seasonality. Can be specified
73 for individual seasonalities using add_seasonality.
74 holidays_prior_scale: float, default=10.0
75 Parameter modulating the strength of the holiday
76 components model, unless overridden in the holidays input.
77 changepoint_prior_scale: float, default=0.05
78 Parameter modulating the flexibility of the
79 automatic changepoint selection. Large values will allow many
80 changepoints, small values will allow few changepoints.
81 mcmc_samples: int, default=0
82 If greater than 0, will do full Bayesian inference
83 with the specified number of MCMC samples. If 0, will do MAP
84 estimation.
85 alpha: float, default=0.05
86 Width of the uncertainty intervals provided
87 for the forecast. If mcmc_samples=0, this will be only the uncertainty
88 in the trend using the MAP estimate of the extrapolated generative
89 model. If mcmc.samples>0, this will be integrated over all model
90 parameters, which will include uncertainty in seasonality.
91 uncertainty_samples: int, default=1000
92 Number of simulated draws used to estimate uncertainty intervals.
93 Settings this value to 0 or False will disable
94 uncertainty estimation and speed up the calculation.
95 stan_backend: str or None, default=None
96 str as defined in StanBackendEnum. If None, will try to
97 iterate over all available backends and find the working one.
98
99 References
100 ----------
101 .. [1] https://facebook.github.io/prophet
102 .. [2] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
103
104 Examples
105 --------
106 >>> from sktime.datasets import load_airline
107 >>> from sktime.forecasting.fbprophet import Prophet
108 >>> # Prophet requires to have data with a pandas.DatetimeIndex
109 >>> y = load_airline().to_timestamp(freq='M')
110 >>> forecaster = Prophet(
111 ... seasonality_mode='multiplicative',
112 ... n_changepoints=int(len(y) / 12),
113 ... add_country_holidays={'country_name': 'Germany'},
114 ... yearly_seasonality=True)
115 >>> forecaster.fit(y)
116 Prophet(...)
117 >>> y_pred = forecaster.predict(fh=[1,2,3])
118 """
119
120 def __init__(
121 self,
122 # Args due to wrapping
123 freq=None,
124 add_seasonality=None,
125 add_country_holidays=None,
126 # Args of fbprophet
127 growth="linear",
128 changepoints=None,
129 n_changepoints=25,
130 changepoint_range=0.8,
131 yearly_seasonality="auto",
132 weekly_seasonality="auto",
133 daily_seasonality="auto",
134 holidays=None,
135 seasonality_mode="additive",
136 seasonality_prior_scale=10.0,
137 holidays_prior_scale=10.0,
138 changepoint_prior_scale=0.05,
139 mcmc_samples=0,
140 alpha=DEFAULT_ALPHA,
141 uncertainty_samples=1000,
142 stan_backend=None,
143 verbose=0,
144 interval_width=0,
145 ):
146 _check_soft_dependencies("prophet", severity="error", object=self)
147
148 self.freq = freq
149 self.add_seasonality = add_seasonality
150 self.add_country_holidays = add_country_holidays
151
152 self.growth = growth
153 self.changepoints = changepoints
154 self.n_changepoints = n_changepoints
155 self.changepoint_range = changepoint_range
156 self.yearly_seasonality = yearly_seasonality
157 self.weekly_seasonality = weekly_seasonality
158 self.daily_seasonality = daily_seasonality
159 self.holidays = holidays
160 self.seasonality_mode = seasonality_mode
161 self.seasonality_prior_scale = seasonality_prior_scale
162 self.changepoint_prior_scale = changepoint_prior_scale
163 self.holidays_prior_scale = holidays_prior_scale
164 self.mcmc_samples = mcmc_samples
165 self.alpha = alpha
166 self.uncertainty_samples = uncertainty_samples
167 self.stan_backend = stan_backend
168 self.verbose = verbose
169 self.interval_width = interval_width
170
171 # import inside method to avoid hard dependency
172 from prophet.forecaster import Prophet as _Prophet
173
174 self._ModelClass = _Prophet
175
176 super(Prophet, self).__init__()
177
178 def _instantiate_model(self):
179 self._forecaster = self._ModelClass(
180 growth=self.growth,
181 changepoints=self.changepoints,
182 n_changepoints=self.n_changepoints,
183 changepoint_range=self.changepoint_range,
184 yearly_seasonality=self.yearly_seasonality,
185 weekly_seasonality=self.weekly_seasonality,
186 daily_seasonality=self.daily_seasonality,
187 holidays=self.holidays,
188 seasonality_mode=self.seasonality_mode,
189 seasonality_prior_scale=float(self.seasonality_prior_scale),
190 holidays_prior_scale=float(self.holidays_prior_scale),
191 changepoint_prior_scale=float(self.changepoint_prior_scale),
192 mcmc_samples=self.mcmc_samples,
193 interval_width=1 - self.alpha,
194 uncertainty_samples=self.uncertainty_samples,
195 stan_backend=self.stan_backend,
196 )
197 return self
198
199 @classmethod
200 def get_test_params(cls):
201 """Return testing parameter settings for the estimator.
202
203 Returns
204 -------
205 params : dict or list of dict
206 """
207 params = {
208 "n_changepoints": 0,
209 "yearly_seasonality": False,
210 "weekly_seasonality": False,
211 "daily_seasonality": False,
212 "uncertainty_samples": 1000,
213 "verbose": False,
214 }
215 return params
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/forecasting/fbprophet.py b/sktime/forecasting/fbprophet.py
--- a/sktime/forecasting/fbprophet.py
+++ b/sktime/forecasting/fbprophet.py
@@ -19,7 +19,9 @@
Parameters
----------
- freq: String of DatetimeIndex frequency. Refer [2]_ for possible values:
+ freq: str, default=None
+ A DatetimeIndex frequency. For possible values see
+ https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
add_seasonality: dict or None, default=None
Dict with args for Prophet.add_seasonality().
Dict can have the following keys/values:
@@ -99,7 +101,6 @@
References
----------
.. [1] https://facebook.github.io/prophet
- .. [2] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
Examples
--------
|
{"golden_diff": "diff --git a/sktime/forecasting/fbprophet.py b/sktime/forecasting/fbprophet.py\n--- a/sktime/forecasting/fbprophet.py\n+++ b/sktime/forecasting/fbprophet.py\n@@ -19,7 +19,9 @@\n \n Parameters\n ----------\n- freq: String of DatetimeIndex frequency. Refer [2]_ for possible values:\n+ freq: str, default=None\n+ A DatetimeIndex frequency. For possible values see\n+ https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n add_seasonality: dict or None, default=None\n Dict with args for Prophet.add_seasonality().\n Dict can have the following keys/values:\n@@ -99,7 +101,6 @@\n References\n ----------\n .. [1] https://facebook.github.io/prophet\n- .. [2] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n \n Examples\n --------\n", "issue": "[BUG] forecasting API docs page is broken\n**Describe the bug**\r\n<!--\r\nNo ARIMA, BATS and Prophet documentation in the API References\r\n-->\r\n\r\n\r\nHello, \r\nIn your API documentation (https://www.sktime.org/en/v0.10.0/api_reference/forecasting.html) there is no references for prophet, ARIMA and BATS/TBATS\r\n\r\n\r\n\r\n\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements Prophet forecaster by wrapping fbprophet.\"\"\"\n\n__author__ = [\"aiwalter\"]\n__all__ = [\"Prophet\"]\n\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base.adapters import _ProphetAdapter\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\n_check_soft_dependencies(\"prophet\", severity=\"warning\")\n\n\nclass Prophet(_ProphetAdapter):\n \"\"\"Prophet forecaster by wrapping Facebook's prophet algorithm [1]_.\n\n Parameters\n ----------\n freq: String of DatetimeIndex frequency. Refer [2]_ for possible values:\n add_seasonality: dict or None, default=None\n Dict with args for Prophet.add_seasonality().\n Dict can have the following keys/values:\n * name: string name of the seasonality component.\n * period: float number of days in one period.\n * fourier_order: int number of Fourier components to use.\n * prior_scale: optional float prior scale for this component.\n * mode: optional 'additive' or 'multiplicative'\n * condition_name: string name of the seasonality condition.\n add_country_holidays: dict or None, default=None\n Dict with args for Prophet.add_country_holidays().\n Dict can have the following keys/values:\n country_name: Name of the country, like 'UnitedStates' or 'US'\n growth: str, default=\"linear\"\n String 'linear' or 'logistic' to specify a linear or logistic\n trend.\n changepoints: list or None, default=None\n List of dates at which to include potential changepoints. If\n not specified, potential changepoints are selected automatically.\n n_changepoints: int, default=25\n Number of potential changepoints to include. Not used\n if input `changepoints` is supplied. If `changepoints` is not supplied,\n then n_changepoints potential changepoints are selected uniformly from\n the first `changepoint_range` proportion of the history.\n changepoint_range: float, default=0.8\n Proportion of history in which trend changepoints will\n be estimated. Defaults to 0.8 for the first 80%. Not used if\n `changepoints` is specified.\n yearly_seasonality: str or bool or int, default=\"auto\"\n Fit yearly seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n weekly_seasonality: str or bool or int, default=\"auto\"\n Fit weekly seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n daily_seasonality: str or bool or int, default=\"auto\"\n Fit daily seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n holidays: pd.DataFrame or None, default=None\n pd.DataFrame with columns holiday (string) and ds (date type)\n and optionally columns lower_window and upper_window which specify a\n range of days around the date to be included as holidays.\n lower_window=-2 will include 2 days prior to the date as holidays. Also\n optionally can have a column prior_scale specifying the prior scale for\n that holiday.\n seasonality_mode: str, default='additive'\n Take one of 'additive' or 'multiplicative'.\n seasonality_prior_scale: float, default=10.0\n Parameter modulating the strength of the seasonality model.\n Larger values allow the model to fit larger seasonal\n fluctuations, smaller values dampen the seasonality. Can be specified\n for individual seasonalities using add_seasonality.\n holidays_prior_scale: float, default=10.0\n Parameter modulating the strength of the holiday\n components model, unless overridden in the holidays input.\n changepoint_prior_scale: float, default=0.05\n Parameter modulating the flexibility of the\n automatic changepoint selection. Large values will allow many\n changepoints, small values will allow few changepoints.\n mcmc_samples: int, default=0\n If greater than 0, will do full Bayesian inference\n with the specified number of MCMC samples. If 0, will do MAP\n estimation.\n alpha: float, default=0.05\n Width of the uncertainty intervals provided\n for the forecast. If mcmc_samples=0, this will be only the uncertainty\n in the trend using the MAP estimate of the extrapolated generative\n model. If mcmc.samples>0, this will be integrated over all model\n parameters, which will include uncertainty in seasonality.\n uncertainty_samples: int, default=1000\n Number of simulated draws used to estimate uncertainty intervals.\n Settings this value to 0 or False will disable\n uncertainty estimation and speed up the calculation.\n stan_backend: str or None, default=None\n str as defined in StanBackendEnum. If None, will try to\n iterate over all available backends and find the working one.\n\n References\n ----------\n .. [1] https://facebook.github.io/prophet\n .. [2] https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.fbprophet import Prophet\n >>> # Prophet requires to have data with a pandas.DatetimeIndex\n >>> y = load_airline().to_timestamp(freq='M')\n >>> forecaster = Prophet(\n ... seasonality_mode='multiplicative',\n ... n_changepoints=int(len(y) / 12),\n ... add_country_holidays={'country_name': 'Germany'},\n ... yearly_seasonality=True)\n >>> forecaster.fit(y)\n Prophet(...)\n >>> y_pred = forecaster.predict(fh=[1,2,3])\n \"\"\"\n\n def __init__(\n self,\n # Args due to wrapping\n freq=None,\n add_seasonality=None,\n add_country_holidays=None,\n # Args of fbprophet\n growth=\"linear\",\n changepoints=None,\n n_changepoints=25,\n changepoint_range=0.8,\n yearly_seasonality=\"auto\",\n weekly_seasonality=\"auto\",\n daily_seasonality=\"auto\",\n holidays=None,\n seasonality_mode=\"additive\",\n seasonality_prior_scale=10.0,\n holidays_prior_scale=10.0,\n changepoint_prior_scale=0.05,\n mcmc_samples=0,\n alpha=DEFAULT_ALPHA,\n uncertainty_samples=1000,\n stan_backend=None,\n verbose=0,\n interval_width=0,\n ):\n _check_soft_dependencies(\"prophet\", severity=\"error\", object=self)\n\n self.freq = freq\n self.add_seasonality = add_seasonality\n self.add_country_holidays = add_country_holidays\n\n self.growth = growth\n self.changepoints = changepoints\n self.n_changepoints = n_changepoints\n self.changepoint_range = changepoint_range\n self.yearly_seasonality = yearly_seasonality\n self.weekly_seasonality = weekly_seasonality\n self.daily_seasonality = daily_seasonality\n self.holidays = holidays\n self.seasonality_mode = seasonality_mode\n self.seasonality_prior_scale = seasonality_prior_scale\n self.changepoint_prior_scale = changepoint_prior_scale\n self.holidays_prior_scale = holidays_prior_scale\n self.mcmc_samples = mcmc_samples\n self.alpha = alpha\n self.uncertainty_samples = uncertainty_samples\n self.stan_backend = stan_backend\n self.verbose = verbose\n self.interval_width = interval_width\n\n # import inside method to avoid hard dependency\n from prophet.forecaster import Prophet as _Prophet\n\n self._ModelClass = _Prophet\n\n super(Prophet, self).__init__()\n\n def _instantiate_model(self):\n self._forecaster = self._ModelClass(\n growth=self.growth,\n changepoints=self.changepoints,\n n_changepoints=self.n_changepoints,\n changepoint_range=self.changepoint_range,\n yearly_seasonality=self.yearly_seasonality,\n weekly_seasonality=self.weekly_seasonality,\n daily_seasonality=self.daily_seasonality,\n holidays=self.holidays,\n seasonality_mode=self.seasonality_mode,\n seasonality_prior_scale=float(self.seasonality_prior_scale),\n holidays_prior_scale=float(self.holidays_prior_scale),\n changepoint_prior_scale=float(self.changepoint_prior_scale),\n mcmc_samples=self.mcmc_samples,\n interval_width=1 - self.alpha,\n uncertainty_samples=self.uncertainty_samples,\n stan_backend=self.stan_backend,\n )\n return self\n\n @classmethod\n def get_test_params(cls):\n \"\"\"Return testing parameter settings for the estimator.\n\n Returns\n -------\n params : dict or list of dict\n \"\"\"\n params = {\n \"n_changepoints\": 0,\n \"yearly_seasonality\": False,\n \"weekly_seasonality\": False,\n \"daily_seasonality\": False,\n \"uncertainty_samples\": 1000,\n \"verbose\": False,\n }\n return params\n", "path": "sktime/forecasting/fbprophet.py"}], "after_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements Prophet forecaster by wrapping fbprophet.\"\"\"\n\n__author__ = [\"aiwalter\"]\n__all__ = [\"Prophet\"]\n\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base.adapters import _ProphetAdapter\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\n_check_soft_dependencies(\"prophet\", severity=\"warning\")\n\n\nclass Prophet(_ProphetAdapter):\n \"\"\"Prophet forecaster by wrapping Facebook's prophet algorithm [1]_.\n\n Parameters\n ----------\n freq: str, default=None\n A DatetimeIndex frequency. For possible values see\n https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html\n add_seasonality: dict or None, default=None\n Dict with args for Prophet.add_seasonality().\n Dict can have the following keys/values:\n * name: string name of the seasonality component.\n * period: float number of days in one period.\n * fourier_order: int number of Fourier components to use.\n * prior_scale: optional float prior scale for this component.\n * mode: optional 'additive' or 'multiplicative'\n * condition_name: string name of the seasonality condition.\n add_country_holidays: dict or None, default=None\n Dict with args for Prophet.add_country_holidays().\n Dict can have the following keys/values:\n country_name: Name of the country, like 'UnitedStates' or 'US'\n growth: str, default=\"linear\"\n String 'linear' or 'logistic' to specify a linear or logistic\n trend.\n changepoints: list or None, default=None\n List of dates at which to include potential changepoints. If\n not specified, potential changepoints are selected automatically.\n n_changepoints: int, default=25\n Number of potential changepoints to include. Not used\n if input `changepoints` is supplied. If `changepoints` is not supplied,\n then n_changepoints potential changepoints are selected uniformly from\n the first `changepoint_range` proportion of the history.\n changepoint_range: float, default=0.8\n Proportion of history in which trend changepoints will\n be estimated. Defaults to 0.8 for the first 80%. Not used if\n `changepoints` is specified.\n yearly_seasonality: str or bool or int, default=\"auto\"\n Fit yearly seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n weekly_seasonality: str or bool or int, default=\"auto\"\n Fit weekly seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n daily_seasonality: str or bool or int, default=\"auto\"\n Fit daily seasonality.\n Can be 'auto', True, False, or a number of Fourier terms to generate.\n holidays: pd.DataFrame or None, default=None\n pd.DataFrame with columns holiday (string) and ds (date type)\n and optionally columns lower_window and upper_window which specify a\n range of days around the date to be included as holidays.\n lower_window=-2 will include 2 days prior to the date as holidays. Also\n optionally can have a column prior_scale specifying the prior scale for\n that holiday.\n seasonality_mode: str, default='additive'\n Take one of 'additive' or 'multiplicative'.\n seasonality_prior_scale: float, default=10.0\n Parameter modulating the strength of the seasonality model.\n Larger values allow the model to fit larger seasonal\n fluctuations, smaller values dampen the seasonality. Can be specified\n for individual seasonalities using add_seasonality.\n holidays_prior_scale: float, default=10.0\n Parameter modulating the strength of the holiday\n components model, unless overridden in the holidays input.\n changepoint_prior_scale: float, default=0.05\n Parameter modulating the flexibility of the\n automatic changepoint selection. Large values will allow many\n changepoints, small values will allow few changepoints.\n mcmc_samples: int, default=0\n If greater than 0, will do full Bayesian inference\n with the specified number of MCMC samples. If 0, will do MAP\n estimation.\n alpha: float, default=0.05\n Width of the uncertainty intervals provided\n for the forecast. If mcmc_samples=0, this will be only the uncertainty\n in the trend using the MAP estimate of the extrapolated generative\n model. If mcmc.samples>0, this will be integrated over all model\n parameters, which will include uncertainty in seasonality.\n uncertainty_samples: int, default=1000\n Number of simulated draws used to estimate uncertainty intervals.\n Settings this value to 0 or False will disable\n uncertainty estimation and speed up the calculation.\n stan_backend: str or None, default=None\n str as defined in StanBackendEnum. If None, will try to\n iterate over all available backends and find the working one.\n\n References\n ----------\n .. [1] https://facebook.github.io/prophet\n\n Examples\n --------\n >>> from sktime.datasets import load_airline\n >>> from sktime.forecasting.fbprophet import Prophet\n >>> # Prophet requires to have data with a pandas.DatetimeIndex\n >>> y = load_airline().to_timestamp(freq='M')\n >>> forecaster = Prophet(\n ... seasonality_mode='multiplicative',\n ... n_changepoints=int(len(y) / 12),\n ... add_country_holidays={'country_name': 'Germany'},\n ... yearly_seasonality=True)\n >>> forecaster.fit(y)\n Prophet(...)\n >>> y_pred = forecaster.predict(fh=[1,2,3])\n \"\"\"\n\n def __init__(\n self,\n # Args due to wrapping\n freq=None,\n add_seasonality=None,\n add_country_holidays=None,\n # Args of fbprophet\n growth=\"linear\",\n changepoints=None,\n n_changepoints=25,\n changepoint_range=0.8,\n yearly_seasonality=\"auto\",\n weekly_seasonality=\"auto\",\n daily_seasonality=\"auto\",\n holidays=None,\n seasonality_mode=\"additive\",\n seasonality_prior_scale=10.0,\n holidays_prior_scale=10.0,\n changepoint_prior_scale=0.05,\n mcmc_samples=0,\n alpha=DEFAULT_ALPHA,\n uncertainty_samples=1000,\n stan_backend=None,\n verbose=0,\n interval_width=0,\n ):\n _check_soft_dependencies(\"prophet\", severity=\"error\", object=self)\n\n self.freq = freq\n self.add_seasonality = add_seasonality\n self.add_country_holidays = add_country_holidays\n\n self.growth = growth\n self.changepoints = changepoints\n self.n_changepoints = n_changepoints\n self.changepoint_range = changepoint_range\n self.yearly_seasonality = yearly_seasonality\n self.weekly_seasonality = weekly_seasonality\n self.daily_seasonality = daily_seasonality\n self.holidays = holidays\n self.seasonality_mode = seasonality_mode\n self.seasonality_prior_scale = seasonality_prior_scale\n self.changepoint_prior_scale = changepoint_prior_scale\n self.holidays_prior_scale = holidays_prior_scale\n self.mcmc_samples = mcmc_samples\n self.alpha = alpha\n self.uncertainty_samples = uncertainty_samples\n self.stan_backend = stan_backend\n self.verbose = verbose\n self.interval_width = interval_width\n\n # import inside method to avoid hard dependency\n from prophet.forecaster import Prophet as _Prophet\n\n self._ModelClass = _Prophet\n\n super(Prophet, self).__init__()\n\n def _instantiate_model(self):\n self._forecaster = self._ModelClass(\n growth=self.growth,\n changepoints=self.changepoints,\n n_changepoints=self.n_changepoints,\n changepoint_range=self.changepoint_range,\n yearly_seasonality=self.yearly_seasonality,\n weekly_seasonality=self.weekly_seasonality,\n daily_seasonality=self.daily_seasonality,\n holidays=self.holidays,\n seasonality_mode=self.seasonality_mode,\n seasonality_prior_scale=float(self.seasonality_prior_scale),\n holidays_prior_scale=float(self.holidays_prior_scale),\n changepoint_prior_scale=float(self.changepoint_prior_scale),\n mcmc_samples=self.mcmc_samples,\n interval_width=1 - self.alpha,\n uncertainty_samples=self.uncertainty_samples,\n stan_backend=self.stan_backend,\n )\n return self\n\n @classmethod\n def get_test_params(cls):\n \"\"\"Return testing parameter settings for the estimator.\n\n Returns\n -------\n params : dict or list of dict\n \"\"\"\n params = {\n \"n_changepoints\": 0,\n \"yearly_seasonality\": False,\n \"weekly_seasonality\": False,\n \"daily_seasonality\": False,\n \"uncertainty_samples\": 1000,\n \"verbose\": False,\n }\n return params\n", "path": "sktime/forecasting/fbprophet.py"}]}
| 3,000 | 231 |
gh_patches_debug_33489
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-1386
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error message when `filter_valid_files` finds invalid files.
Currently if you have invalid files they are silently dropped. If all are dropped then this will trigger a cryptic error. Instead we should give a good error message like:
```
Found invalid files: ..., ..., ..... The supported file extensions are: ...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/core/data/utilities/paths.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Any, Callable, cast, List, Optional, Tuple, TypeVar, Union
16
17 from pytorch_lightning.utilities.exceptions import MisconfigurationException
18
19 from flash.core.data.utilities.sort import sorted_alphanumeric
20
21 PATH_TYPE = Union[str, bytes, os.PathLike]
22
23 T = TypeVar("T")
24
25
26 # adapted from torchvision:
27 # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L10
28 def has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:
29 """Checks if a file is an allowed extension.
30
31 Args:
32 filename (string): path to a file
33 extensions (tuple of strings): extensions to consider (lowercase)
34
35 Returns:
36 bool: True if the filename ends with one of given extensions
37 """
38 return str(filename).lower().endswith(extensions)
39
40
41 # Adapted from torchvision:
42 # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L48
43 def make_dataset(
44 directory: PATH_TYPE,
45 extensions: Optional[Tuple[str, ...]] = None,
46 is_valid_file: Optional[Callable[[str], bool]] = None,
47 ) -> Tuple[List[PATH_TYPE], Optional[List[PATH_TYPE]]]:
48 """Generates a list of samples of a form (path_to_sample, class).
49
50 Args:
51 directory (str): root dataset directory
52 extensions (optional): A list of allowed extensions.
53 Either extensions or is_valid_file should be passed. Defaults to None.
54 is_valid_file (optional): A function that takes path of a file
55 and checks if the file is a valid file
56 (used to check of corrupt files) both extensions and
57 is_valid_file should not be passed. Defaults to None.
58
59 Raises:
60 ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
61
62 Returns:
63 (files, targets) Tuple containing the list of files and corresponding list of targets.
64 """
65 files, targets = [], []
66 directory = os.path.expanduser(str(directory))
67 both_none = extensions is None and is_valid_file is None
68 both_something = extensions is not None and is_valid_file is not None
69 if both_none or both_something:
70 raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
71 if extensions is not None:
72
73 def is_valid_file(x: str) -> bool:
74 return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
75
76 is_valid_file = cast(Callable[[str], bool], is_valid_file)
77 subdirs = list_subdirs(directory)
78 if len(subdirs) > 0:
79 for target_class in subdirs:
80 target_dir = os.path.join(directory, target_class)
81 if not os.path.isdir(target_dir):
82 continue
83 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
84 for fname in sorted(fnames):
85 path = os.path.join(root, fname)
86 if is_valid_file(path):
87 files.append(path)
88 targets.append(target_class)
89 return files, targets
90 return list_valid_files(directory), None
91
92
93 def isdir(path: Any) -> bool:
94 try:
95 return os.path.isdir(path)
96 except TypeError:
97 # data is not path-like (e.g. it may be a list of paths)
98 return False
99
100
101 def list_subdirs(folder: PATH_TYPE) -> List[str]:
102 """List the subdirectories of a given directory.
103
104 Args:
105 folder: The directory to scan.
106
107 Returns:
108 The list of subdirectories.
109 """
110 return list(sorted_alphanumeric(d.name for d in os.scandir(str(folder)) if d.is_dir()))
111
112
113 def list_valid_files(
114 paths: Union[PATH_TYPE, List[PATH_TYPE]], valid_extensions: Optional[Tuple[str, ...]] = None
115 ) -> List[PATH_TYPE]:
116 """List the files with a valid extension present in: a single file, a list of files, or a directory.
117
118 Args:
119 paths: A single file, a list of files, or a directory.
120 valid_extensions: The tuple of valid file extensions.
121
122 Returns:
123 The list of files present in ``paths`` that have a valid extension.
124 """
125 if isdir(paths):
126 paths = [os.path.join(paths, file) for file in os.listdir(paths)]
127
128 if not isinstance(paths, list):
129 paths = [paths]
130
131 if valid_extensions is None:
132 return paths
133 return [path for path in paths if has_file_allowed_extension(path, valid_extensions)]
134
135
136 def filter_valid_files(
137 files: Union[PATH_TYPE, List[PATH_TYPE]],
138 *additional_lists: List[Any],
139 valid_extensions: Optional[Tuple[str, ...]] = None,
140 ) -> Union[List[Any], Tuple[List[Any], ...]]:
141 """Filter the given list of files and any additional lists to include only the entries that contain a file with
142 a valid extension.
143
144 Args:
145 files: The list of files to filter by.
146 additional_lists: Any additional lists to be filtered together with files.
147 valid_extensions: The tuple of valid file extensions.
148
149 Returns:
150 The filtered lists.
151 """
152 if not isinstance(files, List):
153 files = [files]
154
155 additional_lists = tuple([a] if not isinstance(a, List) else a for a in additional_lists)
156
157 if not all(len(a) == len(files) for a in additional_lists):
158 raise MisconfigurationException(
159 f"The number of files ({len(files)}) and the number of items in any additional lists must be the same."
160 )
161
162 if valid_extensions is None:
163 return (files,) + additional_lists
164 filtered = list(
165 filter(lambda sample: has_file_allowed_extension(sample[0], valid_extensions), zip(files, *additional_lists))
166 )
167 if len(additional_lists) > 0:
168 return tuple(zip(*filtered))
169 return [f[0] for f in filtered]
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/core/data/utilities/paths.py b/flash/core/data/utilities/paths.py
--- a/flash/core/data/utilities/paths.py
+++ b/flash/core/data/utilities/paths.py
@@ -14,6 +14,7 @@
import os
from typing import Any, Callable, cast, List, Optional, Tuple, TypeVar, Union
+from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from flash.core.data.utilities.sort import sorted_alphanumeric
@@ -152,6 +153,12 @@
if not isinstance(files, List):
files = [files]
+ if valid_extensions is None:
+ return (files,) + additional_lists
+
+ if not isinstance(valid_extensions, tuple):
+ valid_extensions = tuple(valid_extensions)
+
additional_lists = tuple([a] if not isinstance(a, List) else a for a in additional_lists)
if not all(len(a) == len(files) for a in additional_lists):
@@ -159,11 +166,23 @@
f"The number of files ({len(files)}) and the number of items in any additional lists must be the same."
)
- if valid_extensions is None:
- return (files,) + additional_lists
filtered = list(
filter(lambda sample: has_file_allowed_extension(sample[0], valid_extensions), zip(files, *additional_lists))
)
- if len(additional_lists) > 0:
+
+ filtered_files = [f[0] for f in filtered]
+
+ invalid = [f for f in files if f not in filtered_files]
+
+ if invalid:
+ invalid_extensions = list({"." + f.split(".")[-1] for f in invalid})
+ rank_zero_warn(
+ f"Found invalid file extensions: {', '.join(invalid_extensions)}. "
+ "Files with these extensions will be ignored. "
+ f"The supported file extensions are: {', '.join(valid_extensions)}."
+ )
+
+ if additional_lists:
return tuple(zip(*filtered))
- return [f[0] for f in filtered]
+
+ return filtered_files
|
{"golden_diff": "diff --git a/flash/core/data/utilities/paths.py b/flash/core/data/utilities/paths.py\n--- a/flash/core/data/utilities/paths.py\n+++ b/flash/core/data/utilities/paths.py\n@@ -14,6 +14,7 @@\n import os\n from typing import Any, Callable, cast, List, Optional, Tuple, TypeVar, Union\n \n+from pytorch_lightning.utilities import rank_zero_warn\n from pytorch_lightning.utilities.exceptions import MisconfigurationException\n \n from flash.core.data.utilities.sort import sorted_alphanumeric\n@@ -152,6 +153,12 @@\n if not isinstance(files, List):\n files = [files]\n \n+ if valid_extensions is None:\n+ return (files,) + additional_lists\n+\n+ if not isinstance(valid_extensions, tuple):\n+ valid_extensions = tuple(valid_extensions)\n+\n additional_lists = tuple([a] if not isinstance(a, List) else a for a in additional_lists)\n \n if not all(len(a) == len(files) for a in additional_lists):\n@@ -159,11 +166,23 @@\n f\"The number of files ({len(files)}) and the number of items in any additional lists must be the same.\"\n )\n \n- if valid_extensions is None:\n- return (files,) + additional_lists\n filtered = list(\n filter(lambda sample: has_file_allowed_extension(sample[0], valid_extensions), zip(files, *additional_lists))\n )\n- if len(additional_lists) > 0:\n+\n+ filtered_files = [f[0] for f in filtered]\n+\n+ invalid = [f for f in files if f not in filtered_files]\n+\n+ if invalid:\n+ invalid_extensions = list({\".\" + f.split(\".\")[-1] for f in invalid})\n+ rank_zero_warn(\n+ f\"Found invalid file extensions: {', '.join(invalid_extensions)}. \"\n+ \"Files with these extensions will be ignored. \"\n+ f\"The supported file extensions are: {', '.join(valid_extensions)}.\"\n+ )\n+\n+ if additional_lists:\n return tuple(zip(*filtered))\n- return [f[0] for f in filtered]\n+\n+ return filtered_files\n", "issue": "Error message when `filter_valid_files` finds invalid files.\nCurrently if you have invalid files they are silently dropped. If all are dropped then this will trigger a cryptic error. Instead we should give a good error message like:\r\n```\r\nFound invalid files: ..., ..., ..... The supported file extensions are: ...\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Callable, cast, List, Optional, Tuple, TypeVar, Union\n\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\nfrom flash.core.data.utilities.sort import sorted_alphanumeric\n\nPATH_TYPE = Union[str, bytes, os.PathLike]\n\nT = TypeVar(\"T\")\n\n\n# adapted from torchvision:\n# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L10\ndef has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return str(filename).lower().endswith(extensions)\n\n\n# Adapted from torchvision:\n# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L48\ndef make_dataset(\n directory: PATH_TYPE,\n extensions: Optional[Tuple[str, ...]] = None,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n) -> Tuple[List[PATH_TYPE], Optional[List[PATH_TYPE]]]:\n \"\"\"Generates a list of samples of a form (path_to_sample, class).\n\n Args:\n directory (str): root dataset directory\n extensions (optional): A list of allowed extensions.\n Either extensions or is_valid_file should be passed. Defaults to None.\n is_valid_file (optional): A function that takes path of a file\n and checks if the file is a valid file\n (used to check of corrupt files) both extensions and\n is_valid_file should not be passed. Defaults to None.\n\n Raises:\n ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.\n\n Returns:\n (files, targets) Tuple containing the list of files and corresponding list of targets.\n \"\"\"\n files, targets = [], []\n directory = os.path.expanduser(str(directory))\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n\n def is_valid_file(x: str) -> bool:\n return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))\n\n is_valid_file = cast(Callable[[str], bool], is_valid_file)\n subdirs = list_subdirs(directory)\n if len(subdirs) > 0:\n for target_class in subdirs:\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n files.append(path)\n targets.append(target_class)\n return files, targets\n return list_valid_files(directory), None\n\n\ndef isdir(path: Any) -> bool:\n try:\n return os.path.isdir(path)\n except TypeError:\n # data is not path-like (e.g. it may be a list of paths)\n return False\n\n\ndef list_subdirs(folder: PATH_TYPE) -> List[str]:\n \"\"\"List the subdirectories of a given directory.\n\n Args:\n folder: The directory to scan.\n\n Returns:\n The list of subdirectories.\n \"\"\"\n return list(sorted_alphanumeric(d.name for d in os.scandir(str(folder)) if d.is_dir()))\n\n\ndef list_valid_files(\n paths: Union[PATH_TYPE, List[PATH_TYPE]], valid_extensions: Optional[Tuple[str, ...]] = None\n) -> List[PATH_TYPE]:\n \"\"\"List the files with a valid extension present in: a single file, a list of files, or a directory.\n\n Args:\n paths: A single file, a list of files, or a directory.\n valid_extensions: The tuple of valid file extensions.\n\n Returns:\n The list of files present in ``paths`` that have a valid extension.\n \"\"\"\n if isdir(paths):\n paths = [os.path.join(paths, file) for file in os.listdir(paths)]\n\n if not isinstance(paths, list):\n paths = [paths]\n\n if valid_extensions is None:\n return paths\n return [path for path in paths if has_file_allowed_extension(path, valid_extensions)]\n\n\ndef filter_valid_files(\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n *additional_lists: List[Any],\n valid_extensions: Optional[Tuple[str, ...]] = None,\n) -> Union[List[Any], Tuple[List[Any], ...]]:\n \"\"\"Filter the given list of files and any additional lists to include only the entries that contain a file with\n a valid extension.\n\n Args:\n files: The list of files to filter by.\n additional_lists: Any additional lists to be filtered together with files.\n valid_extensions: The tuple of valid file extensions.\n\n Returns:\n The filtered lists.\n \"\"\"\n if not isinstance(files, List):\n files = [files]\n\n additional_lists = tuple([a] if not isinstance(a, List) else a for a in additional_lists)\n\n if not all(len(a) == len(files) for a in additional_lists):\n raise MisconfigurationException(\n f\"The number of files ({len(files)}) and the number of items in any additional lists must be the same.\"\n )\n\n if valid_extensions is None:\n return (files,) + additional_lists\n filtered = list(\n filter(lambda sample: has_file_allowed_extension(sample[0], valid_extensions), zip(files, *additional_lists))\n )\n if len(additional_lists) > 0:\n return tuple(zip(*filtered))\n return [f[0] for f in filtered]\n", "path": "flash/core/data/utilities/paths.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Callable, cast, List, Optional, Tuple, TypeVar, Union\n\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\nfrom flash.core.data.utilities.sort import sorted_alphanumeric\n\nPATH_TYPE = Union[str, bytes, os.PathLike]\n\nT = TypeVar(\"T\")\n\n\n# adapted from torchvision:\n# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L10\ndef has_file_allowed_extension(filename: PATH_TYPE, extensions: Tuple[str, ...]) -> bool:\n \"\"\"Checks if a file is an allowed extension.\n\n Args:\n filename (string): path to a file\n extensions (tuple of strings): extensions to consider (lowercase)\n\n Returns:\n bool: True if the filename ends with one of given extensions\n \"\"\"\n return str(filename).lower().endswith(extensions)\n\n\n# Adapted from torchvision:\n# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py#L48\ndef make_dataset(\n directory: PATH_TYPE,\n extensions: Optional[Tuple[str, ...]] = None,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n) -> Tuple[List[PATH_TYPE], Optional[List[PATH_TYPE]]]:\n \"\"\"Generates a list of samples of a form (path_to_sample, class).\n\n Args:\n directory (str): root dataset directory\n extensions (optional): A list of allowed extensions.\n Either extensions or is_valid_file should be passed. Defaults to None.\n is_valid_file (optional): A function that takes path of a file\n and checks if the file is a valid file\n (used to check of corrupt files) both extensions and\n is_valid_file should not be passed. Defaults to None.\n\n Raises:\n ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.\n\n Returns:\n (files, targets) Tuple containing the list of files and corresponding list of targets.\n \"\"\"\n files, targets = [], []\n directory = os.path.expanduser(str(directory))\n both_none = extensions is None and is_valid_file is None\n both_something = extensions is not None and is_valid_file is not None\n if both_none or both_something:\n raise ValueError(\"Both extensions and is_valid_file cannot be None or not None at the same time\")\n if extensions is not None:\n\n def is_valid_file(x: str) -> bool:\n return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))\n\n is_valid_file = cast(Callable[[str], bool], is_valid_file)\n subdirs = list_subdirs(directory)\n if len(subdirs) > 0:\n for target_class in subdirs:\n target_dir = os.path.join(directory, target_class)\n if not os.path.isdir(target_dir):\n continue\n for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):\n for fname in sorted(fnames):\n path = os.path.join(root, fname)\n if is_valid_file(path):\n files.append(path)\n targets.append(target_class)\n return files, targets\n return list_valid_files(directory), None\n\n\ndef isdir(path: Any) -> bool:\n try:\n return os.path.isdir(path)\n except TypeError:\n # data is not path-like (e.g. it may be a list of paths)\n return False\n\n\ndef list_subdirs(folder: PATH_TYPE) -> List[str]:\n \"\"\"List the subdirectories of a given directory.\n\n Args:\n folder: The directory to scan.\n\n Returns:\n The list of subdirectories.\n \"\"\"\n return list(sorted_alphanumeric(d.name for d in os.scandir(str(folder)) if d.is_dir()))\n\n\ndef list_valid_files(\n paths: Union[PATH_TYPE, List[PATH_TYPE]], valid_extensions: Optional[Tuple[str, ...]] = None\n) -> List[PATH_TYPE]:\n \"\"\"List the files with a valid extension present in: a single file, a list of files, or a directory.\n\n Args:\n paths: A single file, a list of files, or a directory.\n valid_extensions: The tuple of valid file extensions.\n\n Returns:\n The list of files present in ``paths`` that have a valid extension.\n \"\"\"\n if isdir(paths):\n paths = [os.path.join(paths, file) for file in os.listdir(paths)]\n\n if not isinstance(paths, list):\n paths = [paths]\n\n if valid_extensions is None:\n return paths\n return [path for path in paths if has_file_allowed_extension(path, valid_extensions)]\n\n\ndef filter_valid_files(\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n *additional_lists: List[Any],\n valid_extensions: Optional[Tuple[str, ...]] = None,\n) -> Union[List[Any], Tuple[List[Any], ...]]:\n \"\"\"Filter the given list of files and any additional lists to include only the entries that contain a file with\n a valid extension.\n\n Args:\n files: The list of files to filter by.\n additional_lists: Any additional lists to be filtered together with files.\n valid_extensions: The tuple of valid file extensions.\n\n Returns:\n The filtered lists.\n \"\"\"\n if not isinstance(files, List):\n files = [files]\n\n if valid_extensions is None:\n return (files,) + additional_lists\n\n if not isinstance(valid_extensions, tuple):\n valid_extensions = tuple(valid_extensions)\n\n additional_lists = tuple([a] if not isinstance(a, List) else a for a in additional_lists)\n\n if not all(len(a) == len(files) for a in additional_lists):\n raise MisconfigurationException(\n f\"The number of files ({len(files)}) and the number of items in any additional lists must be the same.\"\n )\n\n filtered = list(\n filter(lambda sample: has_file_allowed_extension(sample[0], valid_extensions), zip(files, *additional_lists))\n )\n\n filtered_files = [f[0] for f in filtered]\n\n invalid = [f for f in files if f not in filtered_files]\n\n if invalid:\n invalid_extensions = list({\".\" + f.split(\".\")[-1] for f in invalid})\n rank_zero_warn(\n f\"Found invalid file extensions: {', '.join(invalid_extensions)}. \"\n \"Files with these extensions will be ignored. \"\n f\"The supported file extensions are: {', '.join(valid_extensions)}.\"\n )\n\n if additional_lists:\n return tuple(zip(*filtered))\n\n return filtered_files\n", "path": "flash/core/data/utilities/paths.py"}]}
| 2,156 | 481 |
gh_patches_debug_2914
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2792
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
broken facebook link when no user_name
If all we have is a user_id, we construct the URL improperly. In that case we need:
`http://facebook.com/profile.php?id=$ID`
But we have:
`http://facebook.com/None`
broken facebook link when no user_name
If all we have is a user_id, we construct the URL improperly. In that case we need:
`http://facebook.com/profile.php?id=$ID`
But we have:
`http://facebook.com/None`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/elsewhere/facebook.py`
Content:
```
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from gratipay.elsewhere import PlatformOAuth2
4 from gratipay.elsewhere._extractors import key
5
6
7 class Facebook(PlatformOAuth2):
8
9 # Platform attributes
10 name = 'facebook'
11 display_name = 'Facebook'
12 account_url = 'https://www.facebook.com/{user_name}'
13
14 # Auth attributes
15 auth_url = 'https://www.facebook.com/dialog/oauth'
16 access_token_url = 'https://graph.facebook.com/oauth/access_token'
17 oauth_default_scope = ['public_profile,email']
18
19 # API attributes
20 api_format = 'json'
21 api_url = 'https://graph.facebook.com'
22 api_user_info_path = '/{user_name}'
23 api_user_self_info_path = '/me'
24
25 # User info extractors
26 x_user_id = key('id')
27 x_user_name = key('username')
28 x_display_name = key('name')
29 x_email = key('email')
30
31 def x_avatar_url(self, extracted, info, default):
32 return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py
--- a/gratipay/elsewhere/facebook.py
+++ b/gratipay/elsewhere/facebook.py
@@ -9,7 +9,7 @@
# Platform attributes
name = 'facebook'
display_name = 'Facebook'
- account_url = 'https://www.facebook.com/{user_name}'
+ account_url = 'https://www.facebook.com/profile.php?id={user_id}'
# Auth attributes
auth_url = 'https://www.facebook.com/dialog/oauth'
|
{"golden_diff": "diff --git a/gratipay/elsewhere/facebook.py b/gratipay/elsewhere/facebook.py\n--- a/gratipay/elsewhere/facebook.py\n+++ b/gratipay/elsewhere/facebook.py\n@@ -9,7 +9,7 @@\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n- account_url = 'https://www.facebook.com/{user_name}'\n+ account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n \n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n", "issue": "broken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\nbroken facebook link when no user_name\nIf all we have is a user_id, we construct the URL improperly. In that case we need:\n\n`http://facebook.com/profile.php?id=$ID`\n\nBut we have:\n\n`http://facebook.com/None`\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n", "path": "gratipay/elsewhere/facebook.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth2\nfrom gratipay.elsewhere._extractors import key\n\n\nclass Facebook(PlatformOAuth2):\n\n # Platform attributes\n name = 'facebook'\n display_name = 'Facebook'\n account_url = 'https://www.facebook.com/profile.php?id={user_id}'\n\n # Auth attributes\n auth_url = 'https://www.facebook.com/dialog/oauth'\n access_token_url = 'https://graph.facebook.com/oauth/access_token'\n oauth_default_scope = ['public_profile,email']\n\n # API attributes\n api_format = 'json'\n api_url = 'https://graph.facebook.com'\n api_user_info_path = '/{user_name}'\n api_user_self_info_path = '/me'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('username')\n x_display_name = key('name')\n x_email = key('email')\n\n def x_avatar_url(self, extracted, info, default):\n return 'https://graph.facebook.com/' + extracted.user_id + '/picture?width=256&height=256'\n", "path": "gratipay/elsewhere/facebook.py"}]}
| 681 | 124 |
gh_patches_debug_6473
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-2102
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Updating "My profile" does not save all profile fields
## What kind of an issue is this?
- [x] Bug report
- [ ] Feature request
## What is the expected behaviour?
Seeing my post number at my [profile overview page](https://online.ntnu.no/profile/overview/)
## What is the current behaviour?
Seeing `None Ugyldig postnummer` at my [profile overview page](https://online.ntnu.no/profile/overview/)
<!-- if this is a bug report -->
## How do you reproduce this problem?
- Go to the [profile edit page](https://online.ntnu.no/profile/edit/), write `7050` (or another post number) into the _Postnummer_ field
- Press _Lagre brukerinformasjon_. No visible errors on page or in browser console, green status box at the top says _"Brukerprofilen din ble endret"_
- Go to the [profile overview page](https://online.ntnu.no/profile/overview/), look for the _Adresse_. You should now see `None Ugyldig postnummer`.
<!-- if this is a bug report -->
<!-- provide steps to reproduce this problem, preferably in a bullet point list -->
## Other information
<!-- include screenshots if appropriate -->
<!-- add labels you see fit. please do not exaggerate. -->
### Screenshots
[Profile edit page:](https://online.ntnu.no/profile/edit/)

[Profile overview page:](https://online.ntnu.no/profile/overview/)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/profiles/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import re
3
4 from django import forms
5 from django.contrib import auth
6 from django.utils.translation import ugettext as _
7
8 from apps.authentication.models import OnlineUser, Position
9 from apps.profiles.models import Privacy
10
11 ZIP_CODE_VALIDATION_ERROR = "Postnummer må bestå av fire siffer."
12
13
14 class ProfileForm(forms.ModelForm):
15 class Meta(object):
16 model = OnlineUser
17
18 fields = [
19 'nickname',
20 'website',
21 'phone_number',
22 'address',
23 'zip_code',
24 'allergies',
25 'compiled',
26 'bio',
27 'gender',
28 'github',
29 'linkedin'
30 ]
31 widgets = {
32 'allergies': forms.Textarea(attrs={'id': 'allergies'}),
33 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),
34 'bio': forms.Textarea(attrs={'id': 'bio'}),
35 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),
36 }
37
38 def clean_zip_code(self):
39 zip_code = self.cleaned_data['zip_code']
40 if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
41 self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)
42
43
44 class PrivacyForm(forms.ModelForm):
45 class Meta(object):
46 model = Privacy
47 exclude = ['user', 'expose_nickname']
48
49
50 class MailSettingsForm(forms.ModelForm):
51 class Meta(object):
52 model = OnlineUser
53 fields = ['infomail', ]
54
55
56 class PositionForm(forms.ModelForm):
57 class Meta(object):
58 model = Position
59 exclude = ['user']
60 widgets = {
61 'committee': forms.Select(attrs={'class': 'form-control'}),
62 'position': forms.Select(attrs={'class': 'form-control'}),
63 }
64
65 def clean(self):
66 super(PositionForm, self).clean()
67
68 range_compiler = re.compile(r'\d{4}-\d{4}')
69 year_range = self.cleaned_data['period']
70
71 # If it doesn't match the format YYYY-YYYY
72 if not range_compiler.match(year_range):
73 self._errors['period'] = self.error_class(
74 [_('Feil format. Dobbelsjekk at input er på formatet YYYY-YYYY.')]
75 )
76 return self.cleaned_data
77
78 years = year_range.split('-')
79
80 # If somewhat they fucked up input, we don't want None-shit after the split.
81 if not years[0] or not years[1]:
82 self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])
83 return self.cleaned_data
84
85 # If first year is larger than latter, or the diff is more than one, fail.
86 if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:
87 self._errors['period'] = self.error_class([_('Ikke gyldig års-intervall. Bare ett år er tillat.')])
88
89 return self.cleaned_data
90
91
92 class MembershipSettingsForm(forms.ModelForm):
93 def __init__(self, *args, **kwargs):
94 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
95 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
96
97 class Meta(object):
98 model = OnlineUser
99 fields = ['field_of_study', 'started_date']
100
101 widgets = {
102 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),
103 }
104
105
106 class InternalServicesForm(forms.Form):
107 ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Online passord"))
108 services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u"Ønsket service passord"))
109 current_user = None
110
111 def clean(self):
112 super(InternalServicesForm, self).clean()
113 if self.is_valid():
114 cleaned_data = self.cleaned_data
115
116 # User object relation here
117 user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])
118
119 if user is None or user.id != self.current_user.id:
120 self._errors['ow4_password'] = self.error_class([_(u"Passordet er ikke korrekt.")])
121
122 return cleaned_data
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -37,9 +37,11 @@
def clean_zip_code(self):
zip_code = self.cleaned_data['zip_code']
- if zip_code and len(zip_code) != 0 and not re.match(r'\d{4}', zip_code):
+ if not zip_code or not re.match(r'^\d{4}$', zip_code):
self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)
+ return zip_code
+
class PrivacyForm(forms.ModelForm):
class Meta(object):
|
{"golden_diff": "diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -37,9 +37,11 @@\n \n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n- if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n+ if not zip_code or not re.match(r'^\\d{4}$', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n \n+ return zip_code\n+\n \n class PrivacyForm(forms.ModelForm):\n class Meta(object):\n", "issue": "Updating \"My profile\" does not save all profile fields\n## What kind of an issue is this?\r\n\r\n- [x] Bug report\r\n- [ ] Feature request\r\n\r\n\r\n## What is the expected behaviour?\r\nSeeing my post number at my [profile overview page](https://online.ntnu.no/profile/overview/)\r\n\r\n\r\n## What is the current behaviour?\r\nSeeing `None Ugyldig postnummer` at my [profile overview page](https://online.ntnu.no/profile/overview/)\r\n\r\n<!-- if this is a bug report -->\r\n\r\n\r\n## How do you reproduce this problem? \r\n\r\n- Go to the [profile edit page](https://online.ntnu.no/profile/edit/), write `7050` (or another post number) into the _Postnummer_ field\r\n- Press _Lagre brukerinformasjon_. No visible errors on page or in browser console, green status box at the top says _\"Brukerprofilen din ble endret\"_\r\n- Go to the [profile overview page](https://online.ntnu.no/profile/overview/), look for the _Adresse_. You should now see `None Ugyldig postnummer`.\r\n\r\n<!-- if this is a bug report -->\r\n<!-- provide steps to reproduce this problem, preferably in a bullet point list -->\r\n\r\n\r\n## Other information\r\n\r\n<!-- include screenshots if appropriate -->\r\n<!-- add labels you see fit. please do not exaggerate. -->\r\n### Screenshots\r\n[Profile edit page:](https://online.ntnu.no/profile/edit/)\r\n\r\n[Profile overview page:](https://online.ntnu.no/profile/overview/)\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if zip_code and len(zip_code) != 0 and not re.match(r'\\d{4}', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport re\n\nfrom django import forms\nfrom django.contrib import auth\nfrom django.utils.translation import ugettext as _\n\nfrom apps.authentication.models import OnlineUser, Position\nfrom apps.profiles.models import Privacy\n\nZIP_CODE_VALIDATION_ERROR = \"Postnummer m\u00e5 best\u00e5 av fire siffer.\"\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n\n fields = [\n 'nickname',\n 'website',\n 'phone_number',\n 'address',\n 'zip_code',\n 'allergies',\n 'compiled',\n 'bio',\n 'gender',\n 'github',\n 'linkedin'\n ]\n widgets = {\n 'allergies': forms.Textarea(attrs={'id': 'allergies'}),\n 'gender': forms.Select(attrs={'class': 'form-control', 'id': 'gender'}),\n 'bio': forms.Textarea(attrs={'id': 'bio'}),\n 'compiled': forms.CheckboxInput(attrs={'id': 'compiled'}),\n }\n\n def clean_zip_code(self):\n zip_code = self.cleaned_data['zip_code']\n if not zip_code or not re.match(r'^\\d{4}$', zip_code):\n self.add_error('zip_code', ZIP_CODE_VALIDATION_ERROR)\n\n return zip_code\n\n\nclass PrivacyForm(forms.ModelForm):\n class Meta(object):\n model = Privacy\n exclude = ['user', 'expose_nickname']\n\n\nclass MailSettingsForm(forms.ModelForm):\n class Meta(object):\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass PositionForm(forms.ModelForm):\n class Meta(object):\n model = Position\n exclude = ['user']\n widgets = {\n 'committee': forms.Select(attrs={'class': 'form-control'}),\n 'position': forms.Select(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n super(PositionForm, self).clean()\n\n range_compiler = re.compile(r'\\d{4}-\\d{4}')\n year_range = self.cleaned_data['period']\n\n # If it doesn't match the format YYYY-YYYY\n if not range_compiler.match(year_range):\n self._errors['period'] = self.error_class(\n [_('Feil format. Dobbelsjekk at input er p\u00e5 formatet YYYY-YYYY.')]\n )\n return self.cleaned_data\n\n years = year_range.split('-')\n\n # If somewhat they fucked up input, we don't want None-shit after the split.\n if not years[0] or not years[1]:\n self._errors['period'] = self.error_class([_('Feil format. Dobbelsjekk input.')])\n return self.cleaned_data\n\n # If first year is larger than latter, or the diff is more than one, fail.\n if (int(years[0]) > int(years[1])) or (int(years[1]) - int(years[0])) > 1:\n self._errors['period'] = self.error_class([_('Ikke gyldig \u00e5rs-intervall. Bare ett \u00e5r er tillat.')])\n\n return self.cleaned_data\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta(object):\n model = OnlineUser\n fields = ['field_of_study', 'started_date']\n\n widgets = {\n 'started_date': forms.TextInput(attrs={'placeholder': 'YYYY-MM-DD'}),\n }\n\n\nclass InternalServicesForm(forms.Form):\n ow4_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"Online passord\"))\n services_password = forms.CharField(widget=forms.PasswordInput(), label=_(u\"\u00d8nsket service passord\"))\n current_user = None\n\n def clean(self):\n super(InternalServicesForm, self).clean()\n if self.is_valid():\n cleaned_data = self.cleaned_data\n\n # User object relation here\n user = auth.authenticate(username=self.current_user.username, password=cleaned_data['ow4_password'])\n\n if user is None or user.id != self.current_user.id:\n self._errors['ow4_password'] = self.error_class([_(u\"Passordet er ikke korrekt.\")])\n\n return cleaned_data\n", "path": "apps/profiles/forms.py"}]}
| 1,908 | 145 |
gh_patches_debug_4927
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Code listing for app.py in "look” Tutorial has a bug
In the [Serving images](https://falcon.readthedocs.io/en/stable/user/tutorial.html#serving-images) section, the code listing for `app.py` tries to import the `images` module as `import images`. I believe this should be `from look import images` or depending on one's preferences `import look.images` and refactor references to `images` as `look.images`. I prefer the former:
```python
import os
import falcon
from look import images
def create_app(image_store):
api = falcon.API()
api.add_route('/images', images.Collection(image_store))
api.add_route('/images/{name}', images.Item(image_store))
return api
def get_app():
storage_path = os.environ.get('LOOK_STORAGE_PATH', '.')
image_store = images.ImageStore(storage_path)
return create_app(image_store)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/media/validators/jsonschema.py`
Content:
```
1 from functools import wraps
2 from inspect import iscoroutinefunction
3
4 import falcon
5
6 try:
7 import jsonschema
8 except ImportError: # pragma: nocover
9 pass
10
11
12 def validate(req_schema=None, resp_schema=None, is_async=False):
13 """Validate ``req.media`` using JSON Schema.
14
15 This decorator provides standard JSON Schema validation via the
16 ``jsonschema`` package available from PyPI. Semantic validation via
17 the *format* keyword is enabled for the default checkers implemented
18 by ``jsonschema.FormatChecker``.
19
20 Note:
21 The `jsonschema`` package must be installed separately in order to use
22 this decorator, as Falcon does not install it by default.
23
24 See `json-schema.org <http://json-schema.org/>`_ for more
25 information on defining a compatible dictionary.
26
27 Keyword Args:
28 req_schema (dict): A dictionary that follows the JSON
29 Schema specification. The request will be validated against this
30 schema.
31 resp_schema (dict): A dictionary that follows the JSON
32 Schema specification. The response will be validated against this
33 schema.
34 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
35 the decorated responder is a coroutine function (i.e., that it
36 is defined with ``async def``) or that it returns an awaitable
37 coroutine object.
38
39 Normally, when the function source is declared using ``async def``,
40 the resulting function object is flagged to indicate it returns a
41 coroutine when invoked, and this can be automatically detected.
42 However, it is possible to use a regular function to return an
43 awaitable coroutine object, in which case a hint is required to let
44 the framework know what to expect. Also, a hint is always required
45 when using a cythonized coroutine function, since Cython does not
46 flag them in a way that can be detected in advance, even when the
47 function is declared using ``async def``.
48
49 Example:
50
51 .. tabs::
52
53 .. tab:: WSGI
54
55 .. code:: python
56
57 from falcon.media.validators import jsonschema
58
59 # -- snip --
60
61 @jsonschema.validate(my_post_schema)
62 def on_post(self, req, resp):
63
64 # -- snip --
65
66 .. tab:: ASGI
67
68 .. code:: python
69
70 from falcon.media.validators import jsonschema
71
72 # -- snip --
73
74 @jsonschema.validate(my_post_schema)
75 async def on_post(self, req, resp):
76
77 # -- snip --
78
79 .. tab:: ASGI (Cythonized App)
80
81 .. code:: python
82
83 from falcon.media.validators import jsonschema
84
85 # -- snip --
86
87 @jsonschema.validate(my_post_schema, is_async=True)
88 async def on_post(self, req, resp):
89
90 # -- snip --
91
92 """
93
94 def decorator(func):
95 if iscoroutinefunction(func) or is_async:
96 return _validate_async(func, req_schema, resp_schema)
97
98 return _validate(func, req_schema, resp_schema)
99
100 return decorator
101
102
103 def _validate(func, req_schema=None, resp_schema=None):
104 @wraps(func)
105 def wrapper(self, req, resp, *args, **kwargs):
106 if req_schema is not None:
107 try:
108 jsonschema.validate(
109 req.media, req_schema, format_checker=jsonschema.FormatChecker()
110 )
111 except jsonschema.ValidationError as e:
112 raise falcon.HTTPBadRequest(
113 title='Request data failed validation', description=e.message
114 )
115
116 result = func(self, req, resp, *args, **kwargs)
117
118 if resp_schema is not None:
119 try:
120 jsonschema.validate(
121 resp.media, resp_schema, format_checker=jsonschema.FormatChecker()
122 )
123 except jsonschema.ValidationError:
124 raise falcon.HTTPInternalServerError(
125 title='Response data failed validation'
126 # Do not return 'e.message' in the response to
127 # prevent info about possible internal response
128 # formatting bugs from leaking out to users.
129 )
130
131 return result
132
133 return wrapper
134
135
136 def _validate_async(func, req_schema=None, resp_schema=None):
137 @wraps(func)
138 async def wrapper(self, req, resp, *args, **kwargs):
139 if req_schema is not None:
140 m = await req.get_media()
141
142 try:
143 jsonschema.validate(
144 m, req_schema, format_checker=jsonschema.FormatChecker()
145 )
146 except jsonschema.ValidationError as e:
147 raise falcon.HTTPBadRequest(
148 title='Request data failed validation', description=e.message
149 )
150
151 result = await func(self, req, resp, *args, **kwargs)
152
153 if resp_schema is not None:
154 try:
155 jsonschema.validate(
156 resp.media, resp_schema, format_checker=jsonschema.FormatChecker()
157 )
158 except jsonschema.ValidationError:
159 raise falcon.HTTPInternalServerError(
160 title='Response data failed validation'
161 # Do not return 'e.message' in the response to
162 # prevent info about possible internal response
163 # formatting bugs from leaking out to users.
164 )
165
166 return result
167
168 return wrapper
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/media/validators/jsonschema.py b/falcon/media/validators/jsonschema.py
--- a/falcon/media/validators/jsonschema.py
+++ b/falcon/media/validators/jsonschema.py
@@ -18,7 +18,7 @@
by ``jsonschema.FormatChecker``.
Note:
- The `jsonschema`` package must be installed separately in order to use
+ The ``jsonschema`` package must be installed separately in order to use
this decorator, as Falcon does not install it by default.
See `json-schema.org <http://json-schema.org/>`_ for more
|
{"golden_diff": "diff --git a/falcon/media/validators/jsonschema.py b/falcon/media/validators/jsonschema.py\n--- a/falcon/media/validators/jsonschema.py\n+++ b/falcon/media/validators/jsonschema.py\n@@ -18,7 +18,7 @@\n by ``jsonschema.FormatChecker``.\n \n Note:\n- The `jsonschema`` package must be installed separately in order to use\n+ The ``jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n \n See `json-schema.org <http://json-schema.org/>`_ for more\n", "issue": "Code listing for app.py in \"look\u201d Tutorial has a bug\nIn the [Serving images](https://falcon.readthedocs.io/en/stable/user/tutorial.html#serving-images) section, the code listing for `app.py` tries to import the `images` module as `import images`. I believe this should be `from look import images` or depending on one's preferences `import look.images` and refactor references to `images` as `look.images`. I prefer the former:\r\n\r\n```python\r\nimport os\r\n\r\nimport falcon\r\n\r\nfrom look import images\r\n\r\n\r\ndef create_app(image_store):\r\n api = falcon.API()\r\n api.add_route('/images', images.Collection(image_store))\r\n api.add_route('/images/{name}', images.Item(image_store))\r\n return api\r\n\r\n\r\ndef get_app():\r\n storage_path = os.environ.get('LOOK_STORAGE_PATH', '.')\r\n image_store = images.ImageStore(storage_path)\r\n return create_app(image_store)\r\n```\r\n\n", "before_files": [{"content": "from functools import wraps\nfrom inspect import iscoroutinefunction\n\nimport falcon\n\ntry:\n import jsonschema\nexcept ImportError: # pragma: nocover\n pass\n\n\ndef validate(req_schema=None, resp_schema=None, is_async=False):\n \"\"\"Validate ``req.media`` using JSON Schema.\n\n This decorator provides standard JSON Schema validation via the\n ``jsonschema`` package available from PyPI. Semantic validation via\n the *format* keyword is enabled for the default checkers implemented\n by ``jsonschema.FormatChecker``.\n\n Note:\n The `jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n\n See `json-schema.org <http://json-schema.org/>`_ for more\n information on defining a compatible dictionary.\n\n Keyword Args:\n req_schema (dict): A dictionary that follows the JSON\n Schema specification. The request will be validated against this\n schema.\n resp_schema (dict): A dictionary that follows the JSON\n Schema specification. The response will be validated against this\n schema.\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n Example:\n\n .. tabs::\n\n .. tab:: WSGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI (Cythonized App)\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema, is_async=True)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n \"\"\"\n\n def decorator(func):\n if iscoroutinefunction(func) or is_async:\n return _validate_async(func, req_schema, resp_schema)\n\n return _validate(func, req_schema, resp_schema)\n\n return decorator\n\n\ndef _validate(func, req_schema=None, resp_schema=None):\n @wraps(func)\n def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n try:\n jsonschema.validate(\n req.media, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n\n\ndef _validate_async(func, req_schema=None, resp_schema=None):\n @wraps(func)\n async def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n m = await req.get_media()\n\n try:\n jsonschema.validate(\n m, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = await func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n", "path": "falcon/media/validators/jsonschema.py"}], "after_files": [{"content": "from functools import wraps\nfrom inspect import iscoroutinefunction\n\nimport falcon\n\ntry:\n import jsonschema\nexcept ImportError: # pragma: nocover\n pass\n\n\ndef validate(req_schema=None, resp_schema=None, is_async=False):\n \"\"\"Validate ``req.media`` using JSON Schema.\n\n This decorator provides standard JSON Schema validation via the\n ``jsonschema`` package available from PyPI. Semantic validation via\n the *format* keyword is enabled for the default checkers implemented\n by ``jsonschema.FormatChecker``.\n\n Note:\n The ``jsonschema`` package must be installed separately in order to use\n this decorator, as Falcon does not install it by default.\n\n See `json-schema.org <http://json-schema.org/>`_ for more\n information on defining a compatible dictionary.\n\n Keyword Args:\n req_schema (dict): A dictionary that follows the JSON\n Schema specification. The request will be validated against this\n schema.\n resp_schema (dict): A dictionary that follows the JSON\n Schema specification. The response will be validated against this\n schema.\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n Example:\n\n .. tabs::\n\n .. tab:: WSGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n .. tab:: ASGI (Cythonized App)\n\n .. code:: python\n\n from falcon.media.validators import jsonschema\n\n # -- snip --\n\n @jsonschema.validate(my_post_schema, is_async=True)\n async def on_post(self, req, resp):\n\n # -- snip --\n\n \"\"\"\n\n def decorator(func):\n if iscoroutinefunction(func) or is_async:\n return _validate_async(func, req_schema, resp_schema)\n\n return _validate(func, req_schema, resp_schema)\n\n return decorator\n\n\ndef _validate(func, req_schema=None, resp_schema=None):\n @wraps(func)\n def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n try:\n jsonschema.validate(\n req.media, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n\n\ndef _validate_async(func, req_schema=None, resp_schema=None):\n @wraps(func)\n async def wrapper(self, req, resp, *args, **kwargs):\n if req_schema is not None:\n m = await req.get_media()\n\n try:\n jsonschema.validate(\n m, req_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Request data failed validation', description=e.message\n )\n\n result = await func(self, req, resp, *args, **kwargs)\n\n if resp_schema is not None:\n try:\n jsonschema.validate(\n resp.media, resp_schema, format_checker=jsonschema.FormatChecker()\n )\n except jsonschema.ValidationError:\n raise falcon.HTTPInternalServerError(\n title='Response data failed validation'\n # Do not return 'e.message' in the response to\n # prevent info about possible internal response\n # formatting bugs from leaking out to users.\n )\n\n return result\n\n return wrapper\n", "path": "falcon/media/validators/jsonschema.py"}]}
| 1,975 | 135 |
gh_patches_debug_690
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-28952
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add instructions to download .zuliprc file
https://zulip.com/api/configuring-python-bindings describes .zuliprc files, but does not give instructions for where download them. We should fix this.
- [ ] Add instructions for downloading a bot's .zuliprc file and your .zuliprc file to https://zulip.com/api/configuring-python-bindings. We'll might want to add some section headings to this page as part of this change. The instructions should have tabs for downloading the file for a bot vs. for yourself.
- [ ] Your own .zuliprc file is downloaded via the "Show/change your API key" on SETTINGS / ACCOUNT & PRIVACY. While we're here, let's rename that button to "Manage your API key".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/markdown/tabbed_sections.py`
Content:
```
1 import re
2 from typing import Any, Dict, List, Mapping, Optional
3
4 import markdown
5 from markdown.extensions import Extension
6 from markdown.preprocessors import Preprocessor
7 from typing_extensions import override
8
9 from zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES
10
11 START_TABBED_SECTION_REGEX = re.compile(r"^\{start_tabs\}$")
12 END_TABBED_SECTION_REGEX = re.compile(r"^\{end_tabs\}$")
13 TAB_CONTENT_REGEX = re.compile(r"^\{tab\|([^}]+)\}$")
14
15 TABBED_SECTION_TEMPLATE = """
16 <div class="tabbed-section {tab_class}" markdown="1">
17 {nav_bar}
18 <div class="blocks">
19 {blocks}
20 </div>
21 </div>
22 """.strip()
23
24 NAV_BAR_TEMPLATE = """
25 <ul class="nav">
26 {tabs}
27 </ul>
28 """.strip()
29
30 NAV_LIST_ITEM_TEMPLATE = """
31 <li data-tab-key="{data_tab_key}" tabindex="0">{label}</li>
32 """.strip()
33
34 DIV_TAB_CONTENT_TEMPLATE = """
35 <div data-tab-key="{data_tab_key}" markdown="1">
36 {content}
37 </div>
38 """.strip()
39
40 # If adding new entries here, also check if you need to update
41 # tabbed-instructions.js
42 TAB_SECTION_LABELS = {
43 "desktop-web": "Desktop/Web",
44 "ios": "iOS",
45 "android": "Android",
46 "mac": "macOS",
47 "windows": "Windows",
48 "linux": "Linux",
49 "python": "Python",
50 "js": "JavaScript",
51 "curl": "curl",
52 "zulip-send": "zulip-send",
53 "web": "Web",
54 "desktop": "Desktop",
55 "mobile": "Mobile",
56 "mm-default": "Default installation",
57 "mm-cloud": "Cloud instance",
58 "mm-docker": "Docker",
59 "mm-gitlab-omnibus": "GitLab Omnibus",
60 "mm-self-hosting-cloud-export": "Self hosting (cloud export)",
61 "require-invitations": "Require invitations",
62 "allow-anyone-to-join": "Allow anyone to join",
63 "restrict-by-email-domain": "Restrict by email domain",
64 "zoom": "Zoom",
65 "jitsi-meet": "Jitsi Meet",
66 "bigbluebutton": "BigBlueButton",
67 "disable": "Disabled",
68 "chrome": "Chrome",
69 "firefox": "Firefox",
70 "desktop-app": "Desktop app",
71 "system-proxy-settings": "System proxy settings",
72 "custom-proxy-settings": "Custom proxy settings",
73 "stream": "From a stream view",
74 "not-stream": "From other views",
75 "via-recent-conversations": "Via recent conversations",
76 "via-inbox-view": "Via inbox view",
77 "via-left-sidebar": "Via left sidebar",
78 "instructions-for-all-platforms": "Instructions for all platforms",
79 "public-streams": "Public streams",
80 "private-streams": "Private streams",
81 "web-public-streams": "Web-public streams",
82 "via-user-card": "Via user card",
83 "via-user-profile": "Via user profile",
84 "via-organization-settings": "Via organization settings",
85 "via-personal-settings": "Via personal settings",
86 "via-stream-settings": "Via stream settings",
87 "default-subdomain": "Default subdomain",
88 "custom-subdomain": "Custom subdomain",
89 "zulip-cloud-standard": "Zulip Cloud Standard",
90 "zulip-cloud-plus": "Zulip Cloud Plus",
91 "request-sponsorship": "Request sponsorship",
92 "request-education-pricing": "Request education pricing",
93 "zulip-cloud": "Zulip Cloud",
94 "self-hosting": "Self hosting",
95 "okta": "Okta",
96 "onelogin": "OneLogin",
97 "azuread": "AzureAD",
98 "keycloak": "Keycloak",
99 "auth0": "Auth0",
100 "logged-in": "If you are logged in",
101 "logged-out": "If you are logged out",
102 "user": "User",
103 "bot": "Bot",
104 "on-sign-up": "On sign-up",
105 "via-paste": "Via paste",
106 "via-drag-and-drop": "Via drag-and-drop",
107 "via-markdown": "Via Markdown",
108 "via-compose-box-buttons": "Via compose box buttons",
109 "stream-compose": "Compose to a stream",
110 "dm-compose": "Compose a DM",
111 "v8": "Zulip Server 8.0+",
112 "v6": "Zulip Server 6.0+",
113 "v4": "Zulip Server 4.0+",
114 "all-versions": "All versions",
115 }
116
117
118 class TabbedSectionsGenerator(Extension):
119 @override
120 def extendMarkdown(self, md: markdown.Markdown) -> None:
121 md.preprocessors.register(
122 TabbedSectionsPreprocessor(md, self.getConfigs()),
123 "tabbed_sections",
124 PREPROCESSOR_PRIORITES["tabbed_sections"],
125 )
126
127
128 class TabbedSectionsPreprocessor(Preprocessor):
129 def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:
130 super().__init__(md)
131
132 @override
133 def run(self, lines: List[str]) -> List[str]:
134 tab_section = self.parse_tabs(lines)
135 while tab_section:
136 if "tabs" in tab_section:
137 tab_class = "has-tabs"
138 else:
139 tab_class = "no-tabs"
140 tab_section["tabs"] = [
141 {
142 "tab_key": "instructions-for-all-platforms",
143 "start": tab_section["start_tabs_index"],
144 }
145 ]
146 nav_bar = self.generate_nav_bar(tab_section)
147 content_blocks = self.generate_content_blocks(tab_section, lines)
148 rendered_tabs = TABBED_SECTION_TEMPLATE.format(
149 tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks
150 )
151
152 start = tab_section["start_tabs_index"]
153 end = tab_section["end_tabs_index"] + 1
154 lines = [*lines[:start], rendered_tabs, *lines[end:]]
155 tab_section = self.parse_tabs(lines)
156 return lines
157
158 def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:
159 tab_content_blocks = []
160 for index, tab in enumerate(tab_section["tabs"]):
161 start_index = tab["start"] + 1
162 try:
163 # If there are more tabs, we can use the starting index
164 # of the next tab as the ending index of the previous one
165 end_index = tab_section["tabs"][index + 1]["start"]
166 except IndexError:
167 # Otherwise, just use the end of the entire section
168 end_index = tab_section["end_tabs_index"]
169
170 content = "\n".join(lines[start_index:end_index]).strip()
171 tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(
172 data_tab_key=tab["tab_key"],
173 # Wrapping the content in two newlines is necessary here.
174 # If we don't do this, the inner Markdown does not get
175 # rendered properly.
176 content=f"\n{content}\n",
177 )
178 tab_content_blocks.append(tab_content_block)
179 return "\n".join(tab_content_blocks)
180
181 def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:
182 li_elements = []
183 for tab in tab_section["tabs"]:
184 tab_key = tab.get("tab_key")
185 tab_label = TAB_SECTION_LABELS.get(tab_key)
186 if tab_label is None:
187 raise ValueError(
188 f"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py"
189 )
190
191 li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)
192 li_elements.append(li)
193
194 return NAV_BAR_TEMPLATE.format(tabs="\n".join(li_elements))
195
196 def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:
197 block: Dict[str, Any] = {}
198 for index, line in enumerate(lines):
199 start_match = START_TABBED_SECTION_REGEX.search(line)
200 if start_match:
201 block["start_tabs_index"] = index
202
203 tab_content_match = TAB_CONTENT_REGEX.search(line)
204 if tab_content_match:
205 block.setdefault("tabs", [])
206 tab = {"start": index, "tab_key": tab_content_match.group(1)}
207 block["tabs"].append(tab)
208
209 end_match = END_TABBED_SECTION_REGEX.search(line)
210 if end_match:
211 block["end_tabs_index"] = index
212 break
213 return block
214
215
216 def makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:
217 return TabbedSectionsGenerator(**kwargs)
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py
--- a/zerver/lib/markdown/tabbed_sections.py
+++ b/zerver/lib/markdown/tabbed_sections.py
@@ -112,6 +112,8 @@
"v6": "Zulip Server 6.0+",
"v4": "Zulip Server 4.0+",
"all-versions": "All versions",
+ "for-a-bot": "For a bot",
+ "for-yourself": "For yourself",
}
|
{"golden_diff": "diff --git a/zerver/lib/markdown/tabbed_sections.py b/zerver/lib/markdown/tabbed_sections.py\n--- a/zerver/lib/markdown/tabbed_sections.py\n+++ b/zerver/lib/markdown/tabbed_sections.py\n@@ -112,6 +112,8 @@\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n+ \"for-a-bot\": \"For a bot\",\n+ \"for-yourself\": \"For yourself\",\n }\n", "issue": "Add instructions to download .zuliprc file\nhttps://zulip.com/api/configuring-python-bindings describes .zuliprc files, but does not give instructions for where download them. We should fix this.\r\n\r\n- [ ] Add instructions for downloading a bot's .zuliprc file and your .zuliprc file to https://zulip.com/api/configuring-python-bindings. We'll might want to add some section headings to this page as part of this change. The instructions should have tabs for downloading the file for a bot vs. for yourself.\r\n\r\n- [ ] Your own .zuliprc file is downloaded via the \"Show/change your API key\" on SETTINGS / ACCOUNT & PRIVACY. While we're here, let's rename that button to \"Manage your API key\".\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, List, Mapping, Optional\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.preprocessors import Preprocessor\nfrom typing_extensions import override\n\nfrom zerver.lib.markdown.priorities import PREPROCESSOR_PRIORITES\n\nSTART_TABBED_SECTION_REGEX = re.compile(r\"^\\{start_tabs\\}$\")\nEND_TABBED_SECTION_REGEX = re.compile(r\"^\\{end_tabs\\}$\")\nTAB_CONTENT_REGEX = re.compile(r\"^\\{tab\\|([^}]+)\\}$\")\n\nTABBED_SECTION_TEMPLATE = \"\"\"\n<div class=\"tabbed-section {tab_class}\" markdown=\"1\">\n{nav_bar}\n<div class=\"blocks\">\n{blocks}\n</div>\n</div>\n\"\"\".strip()\n\nNAV_BAR_TEMPLATE = \"\"\"\n<ul class=\"nav\">\n{tabs}\n</ul>\n\"\"\".strip()\n\nNAV_LIST_ITEM_TEMPLATE = \"\"\"\n<li data-tab-key=\"{data_tab_key}\" tabindex=\"0\">{label}</li>\n\"\"\".strip()\n\nDIV_TAB_CONTENT_TEMPLATE = \"\"\"\n<div data-tab-key=\"{data_tab_key}\" markdown=\"1\">\n{content}\n</div>\n\"\"\".strip()\n\n# If adding new entries here, also check if you need to update\n# tabbed-instructions.js\nTAB_SECTION_LABELS = {\n \"desktop-web\": \"Desktop/Web\",\n \"ios\": \"iOS\",\n \"android\": \"Android\",\n \"mac\": \"macOS\",\n \"windows\": \"Windows\",\n \"linux\": \"Linux\",\n \"python\": \"Python\",\n \"js\": \"JavaScript\",\n \"curl\": \"curl\",\n \"zulip-send\": \"zulip-send\",\n \"web\": \"Web\",\n \"desktop\": \"Desktop\",\n \"mobile\": \"Mobile\",\n \"mm-default\": \"Default installation\",\n \"mm-cloud\": \"Cloud instance\",\n \"mm-docker\": \"Docker\",\n \"mm-gitlab-omnibus\": \"GitLab Omnibus\",\n \"mm-self-hosting-cloud-export\": \"Self hosting (cloud export)\",\n \"require-invitations\": \"Require invitations\",\n \"allow-anyone-to-join\": \"Allow anyone to join\",\n \"restrict-by-email-domain\": \"Restrict by email domain\",\n \"zoom\": \"Zoom\",\n \"jitsi-meet\": \"Jitsi Meet\",\n \"bigbluebutton\": \"BigBlueButton\",\n \"disable\": \"Disabled\",\n \"chrome\": \"Chrome\",\n \"firefox\": \"Firefox\",\n \"desktop-app\": \"Desktop app\",\n \"system-proxy-settings\": \"System proxy settings\",\n \"custom-proxy-settings\": \"Custom proxy settings\",\n \"stream\": \"From a stream view\",\n \"not-stream\": \"From other views\",\n \"via-recent-conversations\": \"Via recent conversations\",\n \"via-inbox-view\": \"Via inbox view\",\n \"via-left-sidebar\": \"Via left sidebar\",\n \"instructions-for-all-platforms\": \"Instructions for all platforms\",\n \"public-streams\": \"Public streams\",\n \"private-streams\": \"Private streams\",\n \"web-public-streams\": \"Web-public streams\",\n \"via-user-card\": \"Via user card\",\n \"via-user-profile\": \"Via user profile\",\n \"via-organization-settings\": \"Via organization settings\",\n \"via-personal-settings\": \"Via personal settings\",\n \"via-stream-settings\": \"Via stream settings\",\n \"default-subdomain\": \"Default subdomain\",\n \"custom-subdomain\": \"Custom subdomain\",\n \"zulip-cloud-standard\": \"Zulip Cloud Standard\",\n \"zulip-cloud-plus\": \"Zulip Cloud Plus\",\n \"request-sponsorship\": \"Request sponsorship\",\n \"request-education-pricing\": \"Request education pricing\",\n \"zulip-cloud\": \"Zulip Cloud\",\n \"self-hosting\": \"Self hosting\",\n \"okta\": \"Okta\",\n \"onelogin\": \"OneLogin\",\n \"azuread\": \"AzureAD\",\n \"keycloak\": \"Keycloak\",\n \"auth0\": \"Auth0\",\n \"logged-in\": \"If you are logged in\",\n \"logged-out\": \"If you are logged out\",\n \"user\": \"User\",\n \"bot\": \"Bot\",\n \"on-sign-up\": \"On sign-up\",\n \"via-paste\": \"Via paste\",\n \"via-drag-and-drop\": \"Via drag-and-drop\",\n \"via-markdown\": \"Via Markdown\",\n \"via-compose-box-buttons\": \"Via compose box buttons\",\n \"stream-compose\": \"Compose to a stream\",\n \"dm-compose\": \"Compose a DM\",\n \"v8\": \"Zulip Server 8.0+\",\n \"v6\": \"Zulip Server 6.0+\",\n \"v4\": \"Zulip Server 4.0+\",\n \"all-versions\": \"All versions\",\n \"for-a-bot\": \"For a bot\",\n \"for-yourself\": \"For yourself\",\n}\n\n\nclass TabbedSectionsGenerator(Extension):\n @override\n def extendMarkdown(self, md: markdown.Markdown) -> None:\n md.preprocessors.register(\n TabbedSectionsPreprocessor(md, self.getConfigs()),\n \"tabbed_sections\",\n PREPROCESSOR_PRIORITES[\"tabbed_sections\"],\n )\n\n\nclass TabbedSectionsPreprocessor(Preprocessor):\n def __init__(self, md: markdown.Markdown, config: Mapping[str, Any]) -> None:\n super().__init__(md)\n\n @override\n def run(self, lines: List[str]) -> List[str]:\n tab_section = self.parse_tabs(lines)\n while tab_section:\n if \"tabs\" in tab_section:\n tab_class = \"has-tabs\"\n else:\n tab_class = \"no-tabs\"\n tab_section[\"tabs\"] = [\n {\n \"tab_key\": \"instructions-for-all-platforms\",\n \"start\": tab_section[\"start_tabs_index\"],\n }\n ]\n nav_bar = self.generate_nav_bar(tab_section)\n content_blocks = self.generate_content_blocks(tab_section, lines)\n rendered_tabs = TABBED_SECTION_TEMPLATE.format(\n tab_class=tab_class, nav_bar=nav_bar, blocks=content_blocks\n )\n\n start = tab_section[\"start_tabs_index\"]\n end = tab_section[\"end_tabs_index\"] + 1\n lines = [*lines[:start], rendered_tabs, *lines[end:]]\n tab_section = self.parse_tabs(lines)\n return lines\n\n def generate_content_blocks(self, tab_section: Dict[str, Any], lines: List[str]) -> str:\n tab_content_blocks = []\n for index, tab in enumerate(tab_section[\"tabs\"]):\n start_index = tab[\"start\"] + 1\n try:\n # If there are more tabs, we can use the starting index\n # of the next tab as the ending index of the previous one\n end_index = tab_section[\"tabs\"][index + 1][\"start\"]\n except IndexError:\n # Otherwise, just use the end of the entire section\n end_index = tab_section[\"end_tabs_index\"]\n\n content = \"\\n\".join(lines[start_index:end_index]).strip()\n tab_content_block = DIV_TAB_CONTENT_TEMPLATE.format(\n data_tab_key=tab[\"tab_key\"],\n # Wrapping the content in two newlines is necessary here.\n # If we don't do this, the inner Markdown does not get\n # rendered properly.\n content=f\"\\n{content}\\n\",\n )\n tab_content_blocks.append(tab_content_block)\n return \"\\n\".join(tab_content_blocks)\n\n def generate_nav_bar(self, tab_section: Dict[str, Any]) -> str:\n li_elements = []\n for tab in tab_section[\"tabs\"]:\n tab_key = tab.get(\"tab_key\")\n tab_label = TAB_SECTION_LABELS.get(tab_key)\n if tab_label is None:\n raise ValueError(\n f\"Tab '{tab_key}' is not present in TAB_SECTION_LABELS in zerver/lib/markdown/tabbed_sections.py\"\n )\n\n li = NAV_LIST_ITEM_TEMPLATE.format(data_tab_key=tab_key, label=tab_label)\n li_elements.append(li)\n\n return NAV_BAR_TEMPLATE.format(tabs=\"\\n\".join(li_elements))\n\n def parse_tabs(self, lines: List[str]) -> Optional[Dict[str, Any]]:\n block: Dict[str, Any] = {}\n for index, line in enumerate(lines):\n start_match = START_TABBED_SECTION_REGEX.search(line)\n if start_match:\n block[\"start_tabs_index\"] = index\n\n tab_content_match = TAB_CONTENT_REGEX.search(line)\n if tab_content_match:\n block.setdefault(\"tabs\", [])\n tab = {\"start\": index, \"tab_key\": tab_content_match.group(1)}\n block[\"tabs\"].append(tab)\n\n end_match = END_TABBED_SECTION_REGEX.search(line)\n if end_match:\n block[\"end_tabs_index\"] = index\n break\n return block\n\n\ndef makeExtension(*args: Any, **kwargs: str) -> TabbedSectionsGenerator:\n return TabbedSectionsGenerator(**kwargs)\n", "path": "zerver/lib/markdown/tabbed_sections.py"}]}
| 2,875 | 128 |
gh_patches_debug_37994
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-656
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Heartbeat max timeout
Let's make sure the heartbeat times out before 30 seconds. Otherwise uWSGI hara kiris the web head and we don't have a chance to send back the result of the heartbeat where it tells us which backend fails.
I suggest 15 seconds max by defaut + an option.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/__init__.py`
Content:
```
1 """Main entry point
2 """
3 import pkg_resources
4
5 from cornice import Service as CorniceService
6 from pyramid.settings import aslist
7
8 from kinto.core import authentication
9 from kinto.core import errors
10 from kinto.core import events
11 from kinto.core.initialization import ( # NOQA
12 initialize, install_middlewares,
13 load_default_settings)
14 from kinto.core.utils import (
15 follow_subrequest, current_service, current_resource_name)
16 from kinto.core.logs import logger
17
18
19 # Module version, as defined in PEP-0396.
20 __version__ = pkg_resources.get_distribution('kinto').version # FIXME?
21
22
23 DEFAULT_SETTINGS = {
24 'backoff': None,
25 'batch_max_requests': 25,
26 'cache_backend': '',
27 'cache_url': '',
28 'cache_pool_size': 25,
29 'cache_prefix': '',
30 'cors_origins': '*',
31 'cors_max_age_seconds': 3600,
32 'eos': None,
33 'eos_message': None,
34 'eos_url': None,
35 'error_info_link': 'https://github.com/Kinto/kinto/issues/',
36 'http_host': None,
37 'http_scheme': None,
38 'id_generator': 'kinto.core.storage.generators.UUID4',
39 'includes': '',
40 'initialization_sequence': (
41 'kinto.core.initialization.setup_request_bound_data',
42 'kinto.core.initialization.setup_json_serializer',
43 'kinto.core.initialization.setup_logging',
44 'kinto.core.initialization.setup_storage',
45 'kinto.core.initialization.setup_permission',
46 'kinto.core.initialization.setup_cache',
47 'kinto.core.initialization.setup_requests_scheme',
48 'kinto.core.initialization.setup_version_redirection',
49 'kinto.core.initialization.setup_deprecation',
50 'kinto.core.initialization.setup_authentication',
51 'kinto.core.initialization.setup_backoff',
52 'kinto.core.initialization.setup_statsd',
53 'kinto.core.initialization.setup_listeners',
54 'kinto.core.events.setup_transaction_hook',
55 ),
56 'event_listeners': '',
57 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',
58 'newrelic_config': None,
59 'newrelic_env': 'dev',
60 'paginate_by': None,
61 'permission_backend': '',
62 'permission_url': '',
63 'permission_pool_size': 25,
64 'profiler_dir': '/tmp',
65 'profiler_enabled': False,
66 'project_docs': '',
67 'project_name': '',
68 'project_version': '',
69 'readonly': False,
70 'retry_after_seconds': 30,
71 'statsd_prefix': 'kinto.core',
72 'statsd_url': None,
73 'storage_backend': '',
74 'storage_url': '',
75 'storage_max_fetch_size': 10000,
76 'storage_pool_size': 25,
77 'tm.annotate_user': False, # Do annotate transactions with the user-id.
78 'transaction_per_request': True,
79 'userid_hmac_secret': '',
80 'version_prefix_redirect_enabled': True,
81 'trailing_slash_redirect_enabled': True,
82 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',
83 'multiauth.policies': 'basicauth',
84 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'
85 'BasicAuthAuthenticationPolicy'),
86 'multiauth.authorization_policy': ('kinto.core.authorization.'
87 'AuthorizationPolicy')
88 }
89
90
91 class Service(CorniceService):
92 """Subclass of the default cornice service.
93
94 This is useful in order to attach specific behaviours without monkey
95 patching the default cornice service (which would impact other uses of it)
96 """
97 default_cors_headers = ('Backoff', 'Retry-After', 'Alert',
98 'Content-Length')
99
100 def error_handler(self, error):
101 return errors.json_error_handler(error)
102
103 @classmethod
104 def init_from_settings(cls, settings):
105 cls.cors_origins = tuple(aslist(settings['cors_origins']))
106 cors_max_age = settings['cors_max_age_seconds']
107 cls.cors_max_age = int(cors_max_age) if cors_max_age else None
108
109
110 def includeme(config):
111 settings = config.get_settings()
112
113 # Heartbeat registry.
114 config.registry.heartbeats = {}
115
116 # Public settings registry.
117 config.registry.public_settings = {'batch_max_requests', 'readonly'}
118
119 # Directive to declare arbitrary API capabilities.
120 def add_api_capability(config, identifier, description="", url="", **kw):
121 existing = config.registry.api_capabilities.get(identifier)
122 if existing:
123 error_msg = "The '%s' API capability was already registered (%s)."
124 raise ValueError(error_msg % (identifier, existing))
125
126 capability = dict(description=description, url=url, **kw)
127 config.registry.api_capabilities[identifier] = capability
128
129 config.add_directive('add_api_capability', add_api_capability)
130 config.registry.api_capabilities = {}
131
132 # Resource events helpers.
133 config.add_request_method(events.get_resource_events,
134 name='get_resource_events')
135 config.add_request_method(events.notify_resource_event,
136 name='notify_resource_event')
137
138 # Setup cornice.
139 config.include("cornice")
140
141 # Per-request transaction.
142 config.include("pyramid_tm")
143
144 # Add CORS settings to the base kinto.core Service class.
145 Service.init_from_settings(settings)
146
147 # Setup components.
148 for step in aslist(settings['initialization_sequence']):
149 step_func = config.maybe_dotted(step)
150 step_func(config)
151
152 # Custom helpers.
153 config.add_request_method(follow_subrequest)
154 config.add_request_method(authentication.prefixed_userid, property=True)
155 config.add_request_method(lambda r: {'id': r.prefixed_userid},
156 name='get_user_info')
157 config.add_request_method(current_resource_name, reify=True)
158 config.add_request_method(current_service, reify=True)
159 config.commit()
160
161 # Include plugins after init, unlike pyramid includes.
162 includes = aslist(settings['includes'])
163 for app in includes:
164 config.include(app)
165
166 # # Show settings to output.
167 # for key, value in settings.items():
168 # logger.info('Using %s = %s' % (key, value))
169
170 # Scan views.
171 config.scan("kinto.core.views")
172
173 # Give sign of life.
174 msg = "%(project_name)s %(project_version)s starting."
175 logger.info(msg % settings)
176
```
Path: `setup.py`
Content:
```
1 import platform
2 import codecs
3 import os
4 from setuptools import setup, find_packages
5
6 here = os.path.abspath(os.path.dirname(__file__))
7
8 def read_file(filename):
9 """Open a related file and return its content."""
10 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
11 content = f.read()
12 return content
13
14 README = read_file('README.rst')
15 CHANGELOG = read_file('CHANGELOG.rst')
16 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
17
18 installed_with_pypy = platform.python_implementation() == 'PyPy'
19
20 REQUIREMENTS = [
21 'colander',
22 'colorama',
23 'cornice >= 1.1', # Fix cache CORS
24 'jsonschema',
25 'python-dateutil',
26 'pyramid_multiauth >= 0.8', # User on policy selected event.
27 'pyramid_tm',
28 'redis', # Default backend
29 'requests',
30 'six',
31 'structlog >= 16.1.0',
32 'enum34',
33 'waitress',
34 ]
35
36 if installed_with_pypy:
37 # We install psycopg2cffi instead of psycopg2 when dealing with pypy
38 # Note: JSONB support landed after psycopg2cffi 2.7.0
39 POSTGRESQL_REQUIRES = [
40 'SQLAlchemy',
41 'psycopg2cffi>2.7.0',
42 'zope.sqlalchemy',
43 ]
44 else:
45 # ujson is not pypy compliant, as it uses the CPython C API
46 REQUIREMENTS.append('ujson >= 1.35')
47 POSTGRESQL_REQUIRES = [
48 'SQLAlchemy',
49 'psycopg2>2.5',
50 'zope.sqlalchemy',
51 ]
52
53 DEPENDENCY_LINKS = [
54 ]
55
56 MONITORING_REQUIRES = [
57 'raven',
58 'statsd',
59 'newrelic',
60 'werkzeug',
61 ]
62
63 ENTRY_POINTS = {
64 'paste.app_factory': [
65 'main = kinto:main',
66 ],
67 'console_scripts': [
68 'kinto = kinto.__main__:main'
69 ],
70 }
71
72
73 setup(name='kinto',
74 version='3.1.1.dev0',
75 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
76 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
77 license='Apache License (2.0)',
78 classifiers=[
79 "Programming Language :: Python",
80 "Programming Language :: Python :: 2",
81 "Programming Language :: Python :: 2.7",
82 "Programming Language :: Python :: 3",
83 "Programming Language :: Python :: 3.4",
84 "Programming Language :: Python :: 3.5",
85 "Programming Language :: Python :: Implementation :: CPython",
86 "Programming Language :: Python :: Implementation :: PyPy",
87 "Topic :: Internet :: WWW/HTTP",
88 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
89 "License :: OSI Approved :: Apache Software License"
90 ],
91 keywords="web sync json storage services",
92 author='Mozilla Services',
93 author_email='[email protected]',
94 url='https://github.com/Kinto/kinto',
95 packages=find_packages(),
96 package_data={'': ['*.rst', '*.py']},
97 include_package_data=True,
98 zip_safe=False,
99 install_requires=REQUIREMENTS,
100 extras_require={
101 'postgresql': POSTGRESQL_REQUIRES,
102 'monitoring': MONITORING_REQUIRES,
103 ":python_version=='2.7'": ["functools32"],
104 },
105 test_suite="kinto.tests",
106 dependency_links=DEPENDENCY_LINKS,
107 entry_points=ENTRY_POINTS)
108
```
Path: `kinto/core/views/heartbeat.py`
Content:
```
1 from pyramid.security import NO_PERMISSION_REQUIRED
2
3 from kinto.core import Service
4
5 heartbeat = Service(name="heartbeat", path='/__heartbeat__',
6 description="Server health")
7
8
9 @heartbeat.get(permission=NO_PERMISSION_REQUIRED)
10 def get_heartbeat(request):
11 """Return information about server health."""
12 status = {}
13
14 heartbeats = request.registry.heartbeats
15 for name, callable in heartbeats.items():
16 status[name] = callable(request)
17
18 has_error = not all([v or v is None for v in status.values()])
19 if has_error:
20 request.response.status = 503
21
22 return status
23
24
25 lbheartbeat = Service(name="lbheartbeat", path='/__lbheartbeat__',
26 description="Web head health")
27
28
29 @lbheartbeat.get(permission=NO_PERMISSION_REQUIRED)
30 def get_lbheartbeat(request):
31 """Return successful healthy response.
32
33 If the load-balancer tries to access this URL and fails, this means the
34 Web head is not operational and should be dropped.
35 """
36 status = {}
37 return status
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py
--- a/kinto/core/__init__.py
+++ b/kinto/core/__init__.py
@@ -54,6 +54,7 @@
'kinto.core.events.setup_transaction_hook',
),
'event_listeners': '',
+ 'heartbeat_timeout_seconds': 10,
'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',
'newrelic_config': None,
'newrelic_env': 'dev',
diff --git a/kinto/core/views/heartbeat.py b/kinto/core/views/heartbeat.py
--- a/kinto/core/views/heartbeat.py
+++ b/kinto/core/views/heartbeat.py
@@ -1,7 +1,10 @@
+from concurrent.futures import ThreadPoolExecutor, wait
from pyramid.security import NO_PERMISSION_REQUIRED
+from kinto import logger
from kinto.core import Service
+
heartbeat = Service(name="heartbeat", path='/__heartbeat__',
description="Server health")
@@ -11,10 +14,38 @@
"""Return information about server health."""
status = {}
+ def heartbeat_check(name, func):
+ status[name] = False
+ status[name] = func(request)
+
+ # Start executing heartbeats concurrently.
heartbeats = request.registry.heartbeats
- for name, callable in heartbeats.items():
- status[name] = callable(request)
+ pool = ThreadPoolExecutor(max_workers=max(1, len(heartbeats.keys())))
+ futures = []
+ for name, func in heartbeats.items():
+ future = pool.submit(heartbeat_check, name, func)
+ future.__heartbeat_name = name # For logging purposes.
+ futures.append(future)
+
+ # Wait for the results, with timeout.
+ seconds = float(request.registry.settings['heartbeat_timeout_seconds'])
+ done, not_done = wait(futures, timeout=seconds)
+
+ # A heartbeat is supposed to return True or False, and never raise.
+ # Just in case, go though results to spot any potential exception.
+ for future in done:
+ exc = future.exception()
+ if exc is not None:
+ logger.error("%r heartbeat failed." % future.__heartbeat_name)
+ logger.error(exc)
+
+ # Log timed-out heartbeats.
+ for future in not_done:
+ name = future.__heartbeat_name
+ error_msg = "%r heartbeat has exceeded timeout of %s seconds."
+ logger.error(error_msg % (name, seconds))
+ # If any has failed, return a 503 error response.
has_error = not all([v or v is None for v in status.values()])
if has_error:
request.response.status = 503
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -100,7 +100,7 @@
extras_require={
'postgresql': POSTGRESQL_REQUIRES,
'monitoring': MONITORING_REQUIRES,
- ":python_version=='2.7'": ["functools32"],
+ ":python_version=='2.7'": ["functools32", "futures"],
},
test_suite="kinto.tests",
dependency_links=DEPENDENCY_LINKS,
|
{"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -54,6 +54,7 @@\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n+ 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\ndiff --git a/kinto/core/views/heartbeat.py b/kinto/core/views/heartbeat.py\n--- a/kinto/core/views/heartbeat.py\n+++ b/kinto/core/views/heartbeat.py\n@@ -1,7 +1,10 @@\n+from concurrent.futures import ThreadPoolExecutor, wait\n from pyramid.security import NO_PERMISSION_REQUIRED\n \n+from kinto import logger\n from kinto.core import Service\n \n+\n heartbeat = Service(name=\"heartbeat\", path='/__heartbeat__',\n description=\"Server health\")\n \n@@ -11,10 +14,38 @@\n \"\"\"Return information about server health.\"\"\"\n status = {}\n \n+ def heartbeat_check(name, func):\n+ status[name] = False\n+ status[name] = func(request)\n+\n+ # Start executing heartbeats concurrently.\n heartbeats = request.registry.heartbeats\n- for name, callable in heartbeats.items():\n- status[name] = callable(request)\n+ pool = ThreadPoolExecutor(max_workers=max(1, len(heartbeats.keys())))\n+ futures = []\n+ for name, func in heartbeats.items():\n+ future = pool.submit(heartbeat_check, name, func)\n+ future.__heartbeat_name = name # For logging purposes.\n+ futures.append(future)\n+\n+ # Wait for the results, with timeout.\n+ seconds = float(request.registry.settings['heartbeat_timeout_seconds'])\n+ done, not_done = wait(futures, timeout=seconds)\n+\n+ # A heartbeat is supposed to return True or False, and never raise.\n+ # Just in case, go though results to spot any potential exception.\n+ for future in done:\n+ exc = future.exception()\n+ if exc is not None:\n+ logger.error(\"%r heartbeat failed.\" % future.__heartbeat_name)\n+ logger.error(exc)\n+\n+ # Log timed-out heartbeats.\n+ for future in not_done:\n+ name = future.__heartbeat_name\n+ error_msg = \"%r heartbeat has exceeded timeout of %s seconds.\"\n+ logger.error(error_msg % (name, seconds))\n \n+ # If any has failed, return a 503 error response.\n has_error = not all([v or v is None for v in status.values()])\n if has_error:\n request.response.status = 503\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -100,7 +100,7 @@\n extras_require={\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n- \":python_version=='2.7'\": [\"functools32\"],\n+ \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"kinto.tests\",\n dependency_links=DEPENDENCY_LINKS,\n", "issue": "Heartbeat max timeout\nLet's make sure the heartbeat times out before 30 seconds. Otherwise uWSGI hara kiris the web head and we don't have a chance to send back the result of the heartbeat where it tells us which backend fails.\n\nI suggest 15 seconds max by defaut + an option.\n\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import authentication\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, error):\n return errors.json_error_handler(error)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(authentication.prefixed_userid, property=True)\n config.add_request_method(lambda r: {'id': r.prefixed_userid},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"%(project_name)s %(project_version)s starting.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}, {"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 1.1', # Fix cache CORS\n 'jsonschema',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'pyramid_tm',\n 'redis', # Default backend\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='3.1.1.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\"],\n },\n test_suite=\"kinto.tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}, {"content": "from pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto.core import Service\n\nheartbeat = Service(name=\"heartbeat\", path='/__heartbeat__',\n description=\"Server health\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef get_heartbeat(request):\n \"\"\"Return information about server health.\"\"\"\n status = {}\n\n heartbeats = request.registry.heartbeats\n for name, callable in heartbeats.items():\n status[name] = callable(request)\n\n has_error = not all([v or v is None for v in status.values()])\n if has_error:\n request.response.status = 503\n\n return status\n\n\nlbheartbeat = Service(name=\"lbheartbeat\", path='/__lbheartbeat__',\n description=\"Web head health\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef get_lbheartbeat(request):\n \"\"\"Return successful healthy response.\n\n If the load-balancer tries to access this URL and fails, this means the\n Web head is not operational and should be dropped.\n \"\"\"\n status = {}\n return status\n", "path": "kinto/core/views/heartbeat.py"}], "after_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import authentication\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, error):\n return errors.json_error_handler(error)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(authentication.prefixed_userid, property=True)\n config.add_request_method(lambda r: {'id': r.prefixed_userid},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"%(project_name)s %(project_version)s starting.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}, {"content": "import platform\nimport codecs\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\ninstalled_with_pypy = platform.python_implementation() == 'PyPy'\n\nREQUIREMENTS = [\n 'colander',\n 'colorama',\n 'cornice >= 1.1', # Fix cache CORS\n 'jsonschema',\n 'python-dateutil',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'pyramid_tm',\n 'redis', # Default backend\n 'requests',\n 'six',\n 'structlog >= 16.1.0',\n 'enum34',\n 'waitress',\n]\n\nif installed_with_pypy:\n # We install psycopg2cffi instead of psycopg2 when dealing with pypy\n # Note: JSONB support landed after psycopg2cffi 2.7.0\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2cffi>2.7.0',\n 'zope.sqlalchemy',\n ]\nelse:\n # ujson is not pypy compliant, as it uses the CPython C API\n REQUIREMENTS.append('ujson >= 1.35')\n POSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2>2.5',\n 'zope.sqlalchemy',\n ]\n\nDEPENDENCY_LINKS = [\n]\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='3.1.1.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web sync json storage services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py']},\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n \":python_version=='2.7'\": [\"functools32\", \"futures\"],\n },\n test_suite=\"kinto.tests\",\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}, {"content": "from concurrent.futures import ThreadPoolExecutor, wait\nfrom pyramid.security import NO_PERMISSION_REQUIRED\n\nfrom kinto import logger\nfrom kinto.core import Service\n\n\nheartbeat = Service(name=\"heartbeat\", path='/__heartbeat__',\n description=\"Server health\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef get_heartbeat(request):\n \"\"\"Return information about server health.\"\"\"\n status = {}\n\n def heartbeat_check(name, func):\n status[name] = False\n status[name] = func(request)\n\n # Start executing heartbeats concurrently.\n heartbeats = request.registry.heartbeats\n pool = ThreadPoolExecutor(max_workers=max(1, len(heartbeats.keys())))\n futures = []\n for name, func in heartbeats.items():\n future = pool.submit(heartbeat_check, name, func)\n future.__heartbeat_name = name # For logging purposes.\n futures.append(future)\n\n # Wait for the results, with timeout.\n seconds = float(request.registry.settings['heartbeat_timeout_seconds'])\n done, not_done = wait(futures, timeout=seconds)\n\n # A heartbeat is supposed to return True or False, and never raise.\n # Just in case, go though results to spot any potential exception.\n for future in done:\n exc = future.exception()\n if exc is not None:\n logger.error(\"%r heartbeat failed.\" % future.__heartbeat_name)\n logger.error(exc)\n\n # Log timed-out heartbeats.\n for future in not_done:\n name = future.__heartbeat_name\n error_msg = \"%r heartbeat has exceeded timeout of %s seconds.\"\n logger.error(error_msg % (name, seconds))\n\n # If any has failed, return a 503 error response.\n has_error = not all([v or v is None for v in status.values()])\n if has_error:\n request.response.status = 503\n\n return status\n\n\nlbheartbeat = Service(name=\"lbheartbeat\", path='/__lbheartbeat__',\n description=\"Web head health\")\n\n\[email protected](permission=NO_PERMISSION_REQUIRED)\ndef get_lbheartbeat(request):\n \"\"\"Return successful healthy response.\n\n If the load-balancer tries to access this URL and fails, this means the\n Web head is not operational and should be dropped.\n \"\"\"\n status = {}\n return status\n", "path": "kinto/core/views/heartbeat.py"}]}
| 3,481 | 731 |
gh_patches_debug_28821
|
rasdani/github-patches
|
git_diff
|
huggingface__text-generation-inference-114
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add typical sampling
HF has TypicalWarper and `typical_p`. Would be nice to be able to pass that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/text_generation_server/utils/tokens.py`
Content:
```
1 import re
2 import torch
3
4 from transformers import (
5 LogitsProcessorList,
6 TemperatureLogitsWarper,
7 TopKLogitsWarper,
8 TopPLogitsWarper,
9 RepetitionPenaltyLogitsProcessor,
10 PreTrainedTokenizerBase,
11 )
12 from typing import List, Tuple, Optional
13
14 from text_generation_server.pb import generate_pb2
15 from text_generation_server.pb.generate_pb2 import FinishReason
16 from text_generation_server.utils.watermark import WatermarkLogitsProcessor
17
18
19 class Sampling:
20 def __init__(self, seed: int, device: str = "cpu"):
21 self.generator = torch.Generator(device)
22 self.generator.manual_seed(seed)
23 self.seed = seed
24
25 def __call__(self, logits):
26 probs = torch.nn.functional.softmax(logits)
27 next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)
28 return next_tokens
29
30
31 class Greedy:
32 def __call__(self, logits):
33 return logits.argmax()
34
35
36 class NextTokenChooser:
37 def __init__(
38 self,
39 watermark=False,
40 temperature=1.0,
41 repetition_penalty=1.0,
42 top_k=None,
43 top_p=None,
44 do_sample=False,
45 seed=0,
46 device="cpu",
47 ):
48 warpers = LogitsProcessorList()
49 # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
50 # all samplers can be found in `generation_utils_samplers.py`
51 sampling = do_sample
52
53 if watermark:
54 warpers.append(WatermarkLogitsProcessor(device=device))
55 if repetition_penalty is not None and repetition_penalty != 1.0:
56 warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))
57 if temperature is not None and temperature != 1.0:
58 temperature = float(temperature)
59 warpers.append(TemperatureLogitsWarper(temperature))
60 sampling = True
61 if top_k is not None and top_k != 0:
62 warpers.append(TopKLogitsWarper(top_k=top_k))
63 sampling = True
64 if top_p is not None and top_p < 1.0:
65 warpers.append(TopPLogitsWarper(top_p=top_p))
66 sampling = True
67
68 self.warpers = warpers
69 self.choice = Sampling(seed, device) if sampling else Greedy()
70
71 def __call__(self, input_ids, scores):
72 # Warp logits
73 scores = self.warpers(input_ids, scores)
74
75 # Compute logprobs
76 logprobs = torch.log_softmax(scores, -1)
77
78 # Choose tokens
79 next_id = self.choice(scores[-1])
80
81 return next_id.view(1, 1), logprobs
82
83 @classmethod
84 def from_pb(
85 cls,
86 pb: generate_pb2.NextTokenChooserParameters,
87 device: torch.device,
88 ) -> "NextTokenChooser":
89 return NextTokenChooser(
90 watermark=pb.watermark,
91 temperature=pb.temperature,
92 repetition_penalty=pb.repetition_penalty,
93 top_k=pb.top_k,
94 top_p=pb.top_p,
95 do_sample=pb.do_sample,
96 seed=pb.seed,
97 device=device,
98 )
99
100
101 class StopSequenceCriteria:
102 def __init__(self, stop_sequence: str):
103 self.regex = re.compile(f".*{stop_sequence}$")
104
105 def __call__(self, output: str) -> bool:
106 if self.regex.findall(output):
107 return True
108 return False
109
110
111 class StoppingCriteria:
112 def __init__(
113 self,
114 eos_token_id: int,
115 stop_sequence_criterias: List[StopSequenceCriteria],
116 max_new_tokens=20,
117 ):
118 self.eos_token_id = eos_token_id
119 self.stop_sequence_criterias = stop_sequence_criterias
120 self.max_new_tokens = max_new_tokens
121 self.current_tokens = 0
122 self.current_output = ""
123
124 def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:
125 self.current_tokens += 1
126 if self.current_tokens >= self.max_new_tokens:
127 return True, FinishReason.FINISH_REASON_LENGTH
128
129 if last_token == self.eos_token_id:
130 return True, FinishReason.FINISH_REASON_EOS_TOKEN
131
132 self.current_output += last_output
133 for stop_sequence_criteria in self.stop_sequence_criterias:
134 if stop_sequence_criteria(self.current_output):
135 return True, FinishReason.FINISH_REASON_STOP_SEQUENCE
136
137 return False, None
138
139 @classmethod
140 def from_pb(
141 cls,
142 pb: generate_pb2.StoppingCriteriaParameters,
143 tokenizer: PreTrainedTokenizerBase,
144 ) -> "StoppingCriteria":
145 stop_sequence_criterias = [
146 StopSequenceCriteria(sequence) for sequence in pb.stop_sequences
147 ]
148 return StoppingCriteria(
149 tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens
150 )
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py
--- a/server/text_generation_server/utils/tokens.py
+++ b/server/text_generation_server/utils/tokens.py
@@ -6,6 +6,7 @@
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
+ TypicalLogitsWarper,
RepetitionPenaltyLogitsProcessor,
PreTrainedTokenizerBase,
)
@@ -41,6 +42,7 @@
repetition_penalty=1.0,
top_k=None,
top_p=None,
+ typical_p=None,
do_sample=False,
seed=0,
device="cpu",
@@ -64,6 +66,9 @@
if top_p is not None and top_p < 1.0:
warpers.append(TopPLogitsWarper(top_p=top_p))
sampling = True
+ if typical_p is not None and typical_p < 1.0:
+ warpers.append(TypicalLogitsWarper(mass=typical_p))
+ sampling = True
self.warpers = warpers
self.choice = Sampling(seed, device) if sampling else Greedy()
@@ -92,6 +97,7 @@
repetition_penalty=pb.repetition_penalty,
top_k=pb.top_k,
top_p=pb.top_p,
+ typical_p=pb.typical_p,
do_sample=pb.do_sample,
seed=pb.seed,
device=device,
|
{"golden_diff": "diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py\n--- a/server/text_generation_server/utils/tokens.py\n+++ b/server/text_generation_server/utils/tokens.py\n@@ -6,6 +6,7 @@\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n+ TypicalLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n )\n@@ -41,6 +42,7 @@\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n+ typical_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n@@ -64,6 +66,9 @@\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n+ if typical_p is not None and typical_p < 1.0:\n+ warpers.append(TypicalLogitsWarper(mass=typical_p))\n+ sampling = True\n \n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n@@ -92,6 +97,7 @@\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n+ typical_p=pb.typical_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n", "issue": "Add typical sampling\nHF has TypicalWarper and `typical_p`. Would be nice to be able to pass that.\n", "before_files": [{"content": "import re\nimport torch\n\nfrom transformers import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n)\nfrom typing import List, Tuple, Optional\n\nfrom text_generation_server.pb import generate_pb2\nfrom text_generation_server.pb.generate_pb2 import FinishReason\nfrom text_generation_server.utils.watermark import WatermarkLogitsProcessor\n\n\nclass Sampling:\n def __init__(self, seed: int, device: str = \"cpu\"):\n self.generator = torch.Generator(device)\n self.generator.manual_seed(seed)\n self.seed = seed\n\n def __call__(self, logits):\n probs = torch.nn.functional.softmax(logits)\n next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)\n return next_tokens\n\n\nclass Greedy:\n def __call__(self, logits):\n return logits.argmax()\n\n\nclass NextTokenChooser:\n def __init__(\n self,\n watermark=False,\n temperature=1.0,\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n ):\n warpers = LogitsProcessorList()\n # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files\n # all samplers can be found in `generation_utils_samplers.py`\n sampling = do_sample\n\n if watermark:\n warpers.append(WatermarkLogitsProcessor(device=device))\n if repetition_penalty is not None and repetition_penalty != 1.0:\n warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))\n if temperature is not None and temperature != 1.0:\n temperature = float(temperature)\n warpers.append(TemperatureLogitsWarper(temperature))\n sampling = True\n if top_k is not None and top_k != 0:\n warpers.append(TopKLogitsWarper(top_k=top_k))\n sampling = True\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n\n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n\n def __call__(self, input_ids, scores):\n # Warp logits\n scores = self.warpers(input_ids, scores)\n\n # Compute logprobs\n logprobs = torch.log_softmax(scores, -1)\n\n # Choose tokens\n next_id = self.choice(scores[-1])\n\n return next_id.view(1, 1), logprobs\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.NextTokenChooserParameters,\n device: torch.device,\n ) -> \"NextTokenChooser\":\n return NextTokenChooser(\n watermark=pb.watermark,\n temperature=pb.temperature,\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n )\n\n\nclass StopSequenceCriteria:\n def __init__(self, stop_sequence: str):\n self.regex = re.compile(f\".*{stop_sequence}$\")\n\n def __call__(self, output: str) -> bool:\n if self.regex.findall(output):\n return True\n return False\n\n\nclass StoppingCriteria:\n def __init__(\n self,\n eos_token_id: int,\n stop_sequence_criterias: List[StopSequenceCriteria],\n max_new_tokens=20,\n ):\n self.eos_token_id = eos_token_id\n self.stop_sequence_criterias = stop_sequence_criterias\n self.max_new_tokens = max_new_tokens\n self.current_tokens = 0\n self.current_output = \"\"\n\n def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:\n self.current_tokens += 1\n if self.current_tokens >= self.max_new_tokens:\n return True, FinishReason.FINISH_REASON_LENGTH\n\n if last_token == self.eos_token_id:\n return True, FinishReason.FINISH_REASON_EOS_TOKEN\n\n self.current_output += last_output\n for stop_sequence_criteria in self.stop_sequence_criterias:\n if stop_sequence_criteria(self.current_output):\n return True, FinishReason.FINISH_REASON_STOP_SEQUENCE\n\n return False, None\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.StoppingCriteriaParameters,\n tokenizer: PreTrainedTokenizerBase,\n ) -> \"StoppingCriteria\":\n stop_sequence_criterias = [\n StopSequenceCriteria(sequence) for sequence in pb.stop_sequences\n ]\n return StoppingCriteria(\n tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens\n )\n", "path": "server/text_generation_server/utils/tokens.py"}], "after_files": [{"content": "import re\nimport torch\n\nfrom transformers import (\n LogitsProcessorList,\n TemperatureLogitsWarper,\n TopKLogitsWarper,\n TopPLogitsWarper,\n TypicalLogitsWarper,\n RepetitionPenaltyLogitsProcessor,\n PreTrainedTokenizerBase,\n)\nfrom typing import List, Tuple, Optional\n\nfrom text_generation_server.pb import generate_pb2\nfrom text_generation_server.pb.generate_pb2 import FinishReason\nfrom text_generation_server.utils.watermark import WatermarkLogitsProcessor\n\n\nclass Sampling:\n def __init__(self, seed: int, device: str = \"cpu\"):\n self.generator = torch.Generator(device)\n self.generator.manual_seed(seed)\n self.seed = seed\n\n def __call__(self, logits):\n probs = torch.nn.functional.softmax(logits)\n next_tokens = torch.multinomial(probs, num_samples=1, generator=self.generator)\n return next_tokens\n\n\nclass Greedy:\n def __call__(self, logits):\n return logits.argmax()\n\n\nclass NextTokenChooser:\n def __init__(\n self,\n watermark=False,\n temperature=1.0,\n repetition_penalty=1.0,\n top_k=None,\n top_p=None,\n typical_p=None,\n do_sample=False,\n seed=0,\n device=\"cpu\",\n ):\n warpers = LogitsProcessorList()\n # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files\n # all samplers can be found in `generation_utils_samplers.py`\n sampling = do_sample\n\n if watermark:\n warpers.append(WatermarkLogitsProcessor(device=device))\n if repetition_penalty is not None and repetition_penalty != 1.0:\n warpers.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty))\n if temperature is not None and temperature != 1.0:\n temperature = float(temperature)\n warpers.append(TemperatureLogitsWarper(temperature))\n sampling = True\n if top_k is not None and top_k != 0:\n warpers.append(TopKLogitsWarper(top_k=top_k))\n sampling = True\n if top_p is not None and top_p < 1.0:\n warpers.append(TopPLogitsWarper(top_p=top_p))\n sampling = True\n if typical_p is not None and typical_p < 1.0:\n warpers.append(TypicalLogitsWarper(mass=typical_p))\n sampling = True\n\n self.warpers = warpers\n self.choice = Sampling(seed, device) if sampling else Greedy()\n\n def __call__(self, input_ids, scores):\n # Warp logits\n scores = self.warpers(input_ids, scores)\n\n # Compute logprobs\n logprobs = torch.log_softmax(scores, -1)\n\n # Choose tokens\n next_id = self.choice(scores[-1])\n\n return next_id.view(1, 1), logprobs\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.NextTokenChooserParameters,\n device: torch.device,\n ) -> \"NextTokenChooser\":\n return NextTokenChooser(\n watermark=pb.watermark,\n temperature=pb.temperature,\n repetition_penalty=pb.repetition_penalty,\n top_k=pb.top_k,\n top_p=pb.top_p,\n typical_p=pb.typical_p,\n do_sample=pb.do_sample,\n seed=pb.seed,\n device=device,\n )\n\n\nclass StopSequenceCriteria:\n def __init__(self, stop_sequence: str):\n self.regex = re.compile(f\".*{stop_sequence}$\")\n\n def __call__(self, output: str) -> bool:\n if self.regex.findall(output):\n return True\n return False\n\n\nclass StoppingCriteria:\n def __init__(\n self,\n eos_token_id: int,\n stop_sequence_criterias: List[StopSequenceCriteria],\n max_new_tokens=20,\n ):\n self.eos_token_id = eos_token_id\n self.stop_sequence_criterias = stop_sequence_criterias\n self.max_new_tokens = max_new_tokens\n self.current_tokens = 0\n self.current_output = \"\"\n\n def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]:\n self.current_tokens += 1\n if self.current_tokens >= self.max_new_tokens:\n return True, FinishReason.FINISH_REASON_LENGTH\n\n if last_token == self.eos_token_id:\n return True, FinishReason.FINISH_REASON_EOS_TOKEN\n\n self.current_output += last_output\n for stop_sequence_criteria in self.stop_sequence_criterias:\n if stop_sequence_criteria(self.current_output):\n return True, FinishReason.FINISH_REASON_STOP_SEQUENCE\n\n return False, None\n\n @classmethod\n def from_pb(\n cls,\n pb: generate_pb2.StoppingCriteriaParameters,\n tokenizer: PreTrainedTokenizerBase,\n ) -> \"StoppingCriteria\":\n stop_sequence_criterias = [\n StopSequenceCriteria(sequence) for sequence in pb.stop_sequences\n ]\n return StoppingCriteria(\n tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens\n )\n", "path": "server/text_generation_server/utils/tokens.py"}]}
| 1,723 | 333 |
gh_patches_debug_18896
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3317
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker compose failing at pip install due to missing dependencies
## Description
<!-- A clear and concise description of what the bug is. -->
When running `docker compose -f docker-compose.yml -f docker-compose.dev.yml up dev-service
`
```
=> [dev-service internal] load .dockerignore 0.0s
=> => transferring context: 143B 0.0s
=> [dev-service internal] load build definition from Dockerfile 0.0s
=> => transferring dockerfile: 2.45kB 0.0s
=> [dev-service internal] load metadata for docker.io/library/python:3.9-buster 0.7s
=> [dev-service 1/13] FROM docker.io/library/python:3.9-buster@sha256:bae5b428ebf32d01a902718b0a58874cbf33d7a4b6a65b7cd7b21d48b0d2e2f1 0.0s
=> [dev-service internal] load build context 0.1s
=> => transferring context: 194.31kB 0.1s
=> CACHED [dev-service 2/13] RUN set -eux; 0.0s
=> CACHED [dev-service 3/13] RUN mkdir -p /etc/apt/keyrings; 0.0s
=> CACHED [dev-service 4/13] RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - ; echo "deb http://apt.postgresql. 0.0s
=> CACHED [dev-service 5/13] RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/node 0.0s
=> CACHED [dev-service 6/13] RUN apt-get update && apt-get install -y --no-install-recommends sudo ca-certificates 0.0s
=> CACHED [dev-service 7/13] RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 0.0s
=> CACHED [dev-service 8/13] RUN apt-get update && apt-get install -y --no-install-recommends postgresql-15 postgresql-client-15 po 0.0s
=> CACHED [dev-service 9/13] WORKDIR /code/ 0.0s
=> CACHED [dev-service 10/13] COPY requirements* ./ 0.0s
=> ERROR [dev-service 11/13] RUN pip install --no-cache-dir -r requirements-dev.txt --force-reinstall sqlalchemy-filters 1.6s
------
> [dev-service 11/13] RUN pip install --no-cache-dir -r requirements-dev.txt --force-reinstall sqlalchemy-filters:
0.826 Collecting sqlalchemy_filters
0.826 Cloning https://github.com/mathesar-foundation/sqlalchemy-filters (to revision models_to_tables) to /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5
0.828 Running command git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5
1.224 fatal: could not read Username for 'https://github.com': No such device or address
1.243 error: subprocess-exited-with-error
1.243
1.243 × git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5 did not run successfully.
1.243 │ exit code: 128
1.243 ╰─> See above for output.
1.243
1.243 note: This error originates from a subprocess, and is likely not a problem with pip.
1.246 error: subprocess-exited-with-error
1.246
1.246 × git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5 did not run successfully.
1.246 │ exit code: 128
1.246 ╰─> See above for output.
1.246
1.246 note: This error originates from a subprocess, and is likely not a problem with pip.
1.541
1.541 [notice] A new release of pip is available: 23.0.1 -> 23.3.1
1.541 [notice] To update, run: pip install --upgrade pip
------
failed to solve: process "/bin/sh -c pip install --no-cache-dir -r ${PYTHON_REQUIREMENTS} --force-reinstall sqlalchemy-filters" did not complete successfully: exit code: 1
```
It is failing because in the requirements .txt there is :
https://github.com/mathesar-foundation/mathesar/blob/08743e6aac39aa3497269e9bb6b4e9b0216f85b9/requirements.txt#L26
and there does not exist any https://github.com/mathesar-foundation/sqlalchemy-filters
(there is though https://github.com/centerofci-archive/sqlalchemy-filters)
This is probably because of recent changes to the org name.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Dependencies should be resolved with no issue.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Clone a fresh repo or even try to rebuild the image by
`docker compose -f docker-compose.yml -f docker-compose.dev.yml up dev-service --force-recreate --build dev-service
`
## Environment
- OS: macOS 13.3.1;
- Browser: Chrome
- Other info:
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/records/operations/relevance.py`
Content:
```
1 from sqlalchemy import case, select
2 from sqlalchemy_filters import apply_sort
3 from db.types import categories
4 from db.types.operations.convert import get_db_type_enum_from_class
5
6 WEIGHT_4 = 4
7 WEIGHT_3 = 3
8 WEIGHT_2 = 2
9 WEIGHT_1 = 1
10 WEIGHT_0 = 0
11 SCORE_COL = '__mathesar_relevance_score'
12
13
14 def get_rank_and_filter_rows_query(relation, parameters_dict, limit=10):
15 """
16 Given a relation, we use a score-assignment algorithm to rank rows of
17 the relation by the strength of their match with the various
18 parameters given in parameters_dict.
19 """
20 rank_cte = _get_scored_selectable(relation, parameters_dict)
21 filtered_ordered_cte = apply_sort(
22 select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0),
23 {'field': SCORE_COL, 'direction': 'desc'}
24 ).cte()
25 return select(
26 *[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]]
27 ).limit(limit)
28
29
30 def _get_scored_selectable(relation, parameters_dict):
31 return select(
32 relation,
33 sum(
34 [
35 _get_col_score_expr(relation.columns[col_name], val)
36 for col_name, val in parameters_dict.items()
37 ]
38 ).label(SCORE_COL)
39 ).cte()
40
41
42 def _get_col_score_expr(col, param_val):
43 col_type = get_db_type_enum_from_class(col.type.__class__)
44
45 if col_type in categories.STRING_LIKE_TYPES:
46 score_expr = case(
47 (col.ilike(param_val), WEIGHT_4),
48 (col.ilike(param_val + '%'), WEIGHT_3),
49 (col.ilike('%' + param_val + '%'), WEIGHT_2),
50 else_=WEIGHT_0
51 )
52 else:
53 score_expr = case((col == param_val, WEIGHT_4), else_=WEIGHT_0)
54
55 return score_expr
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/records/operations/relevance.py b/db/records/operations/relevance.py
--- a/db/records/operations/relevance.py
+++ b/db/records/operations/relevance.py
@@ -1,5 +1,4 @@
-from sqlalchemy import case, select
-from sqlalchemy_filters import apply_sort
+from sqlalchemy import case, select, desc
from db.types import categories
from db.types.operations.convert import get_db_type_enum_from_class
@@ -18,10 +17,7 @@
parameters given in parameters_dict.
"""
rank_cte = _get_scored_selectable(relation, parameters_dict)
- filtered_ordered_cte = apply_sort(
- select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0),
- {'field': SCORE_COL, 'direction': 'desc'}
- ).cte()
+ filtered_ordered_cte = select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0).order_by(desc(SCORE_COL)).cte()
return select(
*[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]]
).limit(limit)
|
{"golden_diff": "diff --git a/db/records/operations/relevance.py b/db/records/operations/relevance.py\n--- a/db/records/operations/relevance.py\n+++ b/db/records/operations/relevance.py\n@@ -1,5 +1,4 @@\n-from sqlalchemy import case, select\n-from sqlalchemy_filters import apply_sort\n+from sqlalchemy import case, select, desc\n from db.types import categories\n from db.types.operations.convert import get_db_type_enum_from_class\n \n@@ -18,10 +17,7 @@\n parameters given in parameters_dict.\n \"\"\"\n rank_cte = _get_scored_selectable(relation, parameters_dict)\n- filtered_ordered_cte = apply_sort(\n- select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0),\n- {'field': SCORE_COL, 'direction': 'desc'}\n- ).cte()\n+ filtered_ordered_cte = select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0).order_by(desc(SCORE_COL)).cte()\n return select(\n *[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]]\n ).limit(limit)\n", "issue": "docker compose failing at pip install due to missing dependencies\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nWhen running `docker compose -f docker-compose.yml -f docker-compose.dev.yml up dev-service\r\n`\r\n```\r\n=> [dev-service internal] load .dockerignore 0.0s\r\n => => transferring context: 143B 0.0s\r\n => [dev-service internal] load build definition from Dockerfile 0.0s\r\n => => transferring dockerfile: 2.45kB 0.0s\r\n => [dev-service internal] load metadata for docker.io/library/python:3.9-buster 0.7s\r\n => [dev-service 1/13] FROM docker.io/library/python:3.9-buster@sha256:bae5b428ebf32d01a902718b0a58874cbf33d7a4b6a65b7cd7b21d48b0d2e2f1 0.0s\r\n => [dev-service internal] load build context 0.1s\r\n => => transferring context: 194.31kB 0.1s\r\n => CACHED [dev-service 2/13] RUN set -eux; 0.0s\r\n => CACHED [dev-service 3/13] RUN mkdir -p /etc/apt/keyrings; 0.0s\r\n => CACHED [dev-service 4/13] RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - ; echo \"deb http://apt.postgresql. 0.0s\r\n => CACHED [dev-service 5/13] RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/node 0.0s\r\n => CACHED [dev-service 6/13] RUN apt-get update && apt-get install -y --no-install-recommends sudo ca-certificates 0.0s\r\n => CACHED [dev-service 7/13] RUN localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 0.0s\r\n => CACHED [dev-service 8/13] RUN apt-get update && apt-get install -y --no-install-recommends postgresql-15 postgresql-client-15 po 0.0s\r\n => CACHED [dev-service 9/13] WORKDIR /code/ 0.0s\r\n => CACHED [dev-service 10/13] COPY requirements* ./ 0.0s\r\n => ERROR [dev-service 11/13] RUN pip install --no-cache-dir -r requirements-dev.txt --force-reinstall sqlalchemy-filters 1.6s\r\n------\r\n > [dev-service 11/13] RUN pip install --no-cache-dir -r requirements-dev.txt --force-reinstall sqlalchemy-filters:\r\n0.826 Collecting sqlalchemy_filters\r\n0.826 Cloning https://github.com/mathesar-foundation/sqlalchemy-filters (to revision models_to_tables) to /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5\r\n0.828 Running command git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5\r\n1.224 fatal: could not read Username for 'https://github.com': No such device or address\r\n1.243 error: subprocess-exited-with-error\r\n1.243 \r\n1.243 \u00d7 git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5 did not run successfully.\r\n1.243 \u2502 exit code: 128\r\n1.243 \u2570\u2500> See above for output.\r\n1.243 \r\n1.243 note: This error originates from a subprocess, and is likely not a problem with pip.\r\n1.246 error: subprocess-exited-with-error\r\n1.246 \r\n1.246 \u00d7 git clone --filter=blob:none --quiet https://github.com/mathesar-foundation/sqlalchemy-filters /tmp/pip-install-461qq5s5/sqlalchemy-filters_d80dc47043234f738a4aa95eaa753ae5 did not run successfully.\r\n1.246 \u2502 exit code: 128\r\n1.246 \u2570\u2500> See above for output.\r\n1.246 \r\n1.246 note: This error originates from a subprocess, and is likely not a problem with pip.\r\n1.541 \r\n1.541 [notice] A new release of pip is available: 23.0.1 -> 23.3.1\r\n1.541 [notice] To update, run: pip install --upgrade pip\r\n------\r\nfailed to solve: process \"/bin/sh -c pip install --no-cache-dir -r ${PYTHON_REQUIREMENTS} --force-reinstall sqlalchemy-filters\" did not complete successfully: exit code: 1\r\n```\r\nIt is failing because in the requirements .txt there is :\r\nhttps://github.com/mathesar-foundation/mathesar/blob/08743e6aac39aa3497269e9bb6b4e9b0216f85b9/requirements.txt#L26\r\n\r\nand there does not exist any https://github.com/mathesar-foundation/sqlalchemy-filters\r\n(there is though https://github.com/centerofci-archive/sqlalchemy-filters)\r\n\r\nThis is probably because of recent changes to the org name.\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nDependencies should be resolved with no issue.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nClone a fresh repo or even try to rebuild the image by\r\n`docker compose -f docker-compose.yml -f docker-compose.dev.yml up dev-service --force-recreate --build dev-service\r\n`\r\n\r\n## Environment\r\n - OS: macOS 13.3.1; \r\n - Browser: Chrome\r\n - Other info:\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\n", "before_files": [{"content": "from sqlalchemy import case, select\nfrom sqlalchemy_filters import apply_sort\nfrom db.types import categories\nfrom db.types.operations.convert import get_db_type_enum_from_class\n\nWEIGHT_4 = 4\nWEIGHT_3 = 3\nWEIGHT_2 = 2\nWEIGHT_1 = 1\nWEIGHT_0 = 0\nSCORE_COL = '__mathesar_relevance_score'\n\n\ndef get_rank_and_filter_rows_query(relation, parameters_dict, limit=10):\n \"\"\"\n Given a relation, we use a score-assignment algorithm to rank rows of\n the relation by the strength of their match with the various\n parameters given in parameters_dict.\n \"\"\"\n rank_cte = _get_scored_selectable(relation, parameters_dict)\n filtered_ordered_cte = apply_sort(\n select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0),\n {'field': SCORE_COL, 'direction': 'desc'}\n ).cte()\n return select(\n *[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]]\n ).limit(limit)\n\n\ndef _get_scored_selectable(relation, parameters_dict):\n return select(\n relation,\n sum(\n [\n _get_col_score_expr(relation.columns[col_name], val)\n for col_name, val in parameters_dict.items()\n ]\n ).label(SCORE_COL)\n ).cte()\n\n\ndef _get_col_score_expr(col, param_val):\n col_type = get_db_type_enum_from_class(col.type.__class__)\n\n if col_type in categories.STRING_LIKE_TYPES:\n score_expr = case(\n (col.ilike(param_val), WEIGHT_4),\n (col.ilike(param_val + '%'), WEIGHT_3),\n (col.ilike('%' + param_val + '%'), WEIGHT_2),\n else_=WEIGHT_0\n )\n else:\n score_expr = case((col == param_val, WEIGHT_4), else_=WEIGHT_0)\n\n return score_expr\n", "path": "db/records/operations/relevance.py"}], "after_files": [{"content": "from sqlalchemy import case, select, desc\nfrom db.types import categories\nfrom db.types.operations.convert import get_db_type_enum_from_class\n\nWEIGHT_4 = 4\nWEIGHT_3 = 3\nWEIGHT_2 = 2\nWEIGHT_1 = 1\nWEIGHT_0 = 0\nSCORE_COL = '__mathesar_relevance_score'\n\n\ndef get_rank_and_filter_rows_query(relation, parameters_dict, limit=10):\n \"\"\"\n Given a relation, we use a score-assignment algorithm to rank rows of\n the relation by the strength of their match with the various\n parameters given in parameters_dict.\n \"\"\"\n rank_cte = _get_scored_selectable(relation, parameters_dict)\n filtered_ordered_cte = select(rank_cte).where(rank_cte.columns[SCORE_COL] > 0).order_by(desc(SCORE_COL)).cte()\n return select(\n *[filtered_ordered_cte.columns[c] for c in [col.name for col in relation.columns]]\n ).limit(limit)\n\n\ndef _get_scored_selectable(relation, parameters_dict):\n return select(\n relation,\n sum(\n [\n _get_col_score_expr(relation.columns[col_name], val)\n for col_name, val in parameters_dict.items()\n ]\n ).label(SCORE_COL)\n ).cte()\n\n\ndef _get_col_score_expr(col, param_val):\n col_type = get_db_type_enum_from_class(col.type.__class__)\n\n if col_type in categories.STRING_LIKE_TYPES:\n score_expr = case(\n (col.ilike(param_val), WEIGHT_4),\n (col.ilike(param_val + '%'), WEIGHT_3),\n (col.ilike('%' + param_val + '%'), WEIGHT_2),\n else_=WEIGHT_0\n )\n else:\n score_expr = case((col == param_val, WEIGHT_4), else_=WEIGHT_0)\n\n return score_expr\n", "path": "db/records/operations/relevance.py"}]}
| 2,378 | 251 |
gh_patches_debug_10961
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-5110
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Conan remote update command error in 1.15
With the Conan 1.15 release it is no longer possible to use the `conan remote update` command to ensure an existing remote is configured to be at the specified index. Reverting to Conan 1.14 sees the command work as expected.
conan remote update ci-conan https://my-artifactory-instance.com/artifactory/api/conan/ci-conan --insert=0
ERROR: Remote 'ci-conan' already exists with same URL
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/cache/remote_registry.py`
Content:
```
1 import json
2 import os
3 from collections import OrderedDict, namedtuple
4
5 from conans.errors import ConanException, NoRemoteAvailable
6 from conans.util.config_parser import get_bool_from_text_value
7 from conans.util.files import load, save
8 from conans.model.ref import PackageReference, ConanFileReference
9
10
11 Remote = namedtuple("Remote", "name url verify_ssl")
12
13
14 def load_registry_txt(contents):
15 """Remove in Conan 2.0"""
16 remotes = Remotes()
17 refs = {}
18 end_remotes = False
19 # Parse the file
20 for line in contents.splitlines():
21 line = line.strip()
22
23 if not line:
24 if end_remotes:
25 raise ConanException("Bad file format, blank line")
26 end_remotes = True
27 continue
28 chunks = line.split()
29 if not end_remotes:
30 if len(chunks) == 2: # Retro compatibility
31 remote_name, url = chunks
32 verify_ssl = "True"
33 elif len(chunks) == 3:
34 remote_name, url, verify_ssl = chunks
35 else:
36 raise ConanException("Bad file format, wrong item numbers in line '%s'" % line)
37
38 verify_ssl = get_bool_from_text_value(verify_ssl)
39 remotes.add(remote_name, url, verify_ssl)
40 else:
41 ref, remote_name = chunks
42 refs[ref] = remote_name
43
44 return remotes, refs
45
46
47 def load_old_registry_json(contents):
48 """From json"""
49 data = json.loads(contents)
50 remotes = Remotes()
51 refs = data.get("references", {})
52 prefs = data.get("package_references", {})
53 for r in data["remotes"]:
54 remotes.add(r["name"], r["url"], r["verify_ssl"])
55 return remotes, refs, prefs
56
57
58 def migrate_registry_file(cache, out):
59 folder = cache.conan_folder
60 reg_json_path = os.path.join(folder, "registry.json")
61 reg_txt_path = os.path.join(folder, "registry.txt")
62 remotes_path = cache.registry_path
63
64 def add_ref_remote(reference, remotes, remote_name):
65 ref = ConanFileReference.loads(reference, validate=True)
66 remote = remotes.get(remote_name)
67 if remote:
68 with cache.package_layout(ref).update_metadata() as metadata:
69 metadata.recipe.remote = remote.name
70
71 def add_pref_remote(pkg_ref, remotes, remote_name):
72 pref = PackageReference.loads(pkg_ref, validate=True)
73 remote = remotes.get(remote_name)
74 if remote:
75 with cache.package_layout(pref.ref).update_metadata() as metadata:
76 metadata.packages[pref.id].remote = remote.name
77
78 try:
79 if os.path.exists(reg_json_path):
80 out.warn("registry.json has been deprecated. Migrating to remotes.json")
81 remotes, refs, prefs = load_old_registry_json(load(reg_json_path))
82 remotes.save(remotes_path)
83 for ref, remote_name in refs.items():
84 add_ref_remote(ref, remotes, remote_name)
85 for pref, remote_name in prefs.items():
86 add_pref_remote(pref, remotes, remote_name)
87 os.remove(reg_json_path)
88 elif os.path.exists(reg_txt_path):
89 out.warn("registry.txt has been deprecated. Migrating to remotes.json")
90 remotes, refs = load_registry_txt(load(reg_txt_path))
91 remotes.save(remotes_path)
92 for ref, remote_name in refs.items():
93 add_ref_remote(ref, remotes, remote_name)
94 os.remove(reg_txt_path)
95
96 except Exception as e:
97 raise ConanException("Cannot migrate old registry: %s" % str(e))
98
99
100 class Remotes(object):
101 def __init__(self):
102 self._remotes = OrderedDict()
103 self.selected = None
104
105 @classmethod
106 def defaults(cls):
107 result = Remotes()
108 result._remotes["conan-center"] = Remote("conan-center", "https://conan.bintray.com", True)
109 return result
110
111 def select(self, remote_name):
112 self.selected = self[remote_name] if remote_name is not None else None
113
114 def __bool__(self):
115 return bool(self._remotes)
116
117 def __nonzero__(self):
118 return self.__bool__()
119
120 def clear(self):
121 self._remotes.clear()
122
123 def items(self):
124 return self._remotes.items()
125
126 def values(self):
127 return self._remotes.values()
128
129 @staticmethod
130 def loads(text):
131 result = Remotes()
132 data = json.loads(text)
133 for r in data.get("remotes", []):
134 result._remotes[r["name"]] = Remote(r["name"], r["url"], r["verify_ssl"])
135
136 return result
137
138 def dumps(self):
139 result = []
140 for remote in self._remotes.values():
141 result.append("%s: %s [Verify SSL: %s]" % (remote.name, remote.url, remote.verify_ssl))
142 return "\n".join(result)
143
144 def save(self, filename):
145 ret = {"remotes": [{"name": r, "url": u, "verify_ssl": v}
146 for r, (_, u, v) in self._remotes.items()]}
147 save(filename, json.dumps(ret, indent=True))
148
149 def _get_by_url(self, url):
150 for remote in self._remotes.values():
151 if remote.url == url:
152 return remote
153
154 def rename(self, remote_name, new_remote_name):
155 if new_remote_name in self._remotes:
156 raise ConanException("Remote '%s' already exists" % new_remote_name)
157
158 remote = self._remotes[remote_name]
159 new_remote = Remote(new_remote_name, remote.url, remote.verify_ssl)
160 self._remotes = OrderedDict([(new_remote_name, new_remote) if k == remote_name
161 else (k, v) for k, v in self._remotes.items()])
162
163 def get_remote(self, remote_name):
164 # Returns the remote defined by the name, or the default if is None
165 return self[remote_name] if remote_name is not None else self.default
166
167 @property
168 def default(self):
169 try:
170 # This is the python way to get the first element of an OrderedDict
171 return self._remotes[next(iter(self._remotes))]
172 except StopIteration:
173 raise NoRemoteAvailable("No default remote defined")
174
175 def __contains__(self, remote_name):
176 return remote_name in self._remotes
177
178 def get(self, remote_name):
179 return self._remotes.get(remote_name)
180
181 def __getitem__(self, remote_name):
182 try:
183 return self._remotes[remote_name]
184 except KeyError:
185 raise NoRemoteAvailable("No remote '%s' defined in remotes" % (remote_name))
186
187 def __delitem__(self, remote_name):
188 try:
189 del self._remotes[remote_name]
190 except KeyError:
191 raise NoRemoteAvailable("No remote '%s' defined in remotes" % (remote_name))
192
193 def _upsert(self, remote_name, url, verify_ssl, insert):
194 # Remove duplicates
195 updated_remote = Remote(remote_name, url, verify_ssl)
196 self._remotes.pop(remote_name, None)
197 remotes_list = []
198 renamed = None
199
200 for name, remote in self._remotes.items():
201 if remote.url != url:
202 remotes_list.append((name, remote))
203 else:
204 renamed = name
205
206 if insert is not None:
207 try:
208 insert_index = int(insert)
209 except ValueError:
210 raise ConanException("insert argument must be an integer")
211 remotes_list.insert(insert_index, (remote_name, updated_remote))
212 else:
213 remotes_list.append((remote_name, updated_remote))
214 self._remotes = OrderedDict(remotes_list)
215 return renamed
216
217 def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
218 if force:
219 return self._upsert(remote_name, url, verify_ssl, insert)
220
221 if remote_name in self._remotes:
222 raise ConanException("Remote '%s' already exists in remotes (use update to modify)"
223 % remote_name)
224 self._add_update(remote_name, url, verify_ssl, insert)
225
226 def update(self, remote_name, url, verify_ssl=True, insert=None):
227 if remote_name not in self._remotes:
228 raise ConanException("Remote '%s' not found in remotes" % remote_name)
229 self._add_update(remote_name, url, verify_ssl, insert)
230
231 def _add_update(self, remote_name, url, verify_ssl, insert=None):
232 prev_remote = self._get_by_url(url)
233 if prev_remote and verify_ssl == prev_remote.verify_ssl:
234 raise ConanException("Remote '%s' already exists with same URL" % prev_remote.name)
235 updated_remote = Remote(remote_name, url, verify_ssl)
236 if insert is not None:
237 try:
238 insert_index = int(insert)
239 except ValueError:
240 raise ConanException("insert argument must be an integer")
241 self._remotes.pop(remote_name, None) # Remove if exists (update)
242 remotes_list = list(self._remotes.items())
243 remotes_list.insert(insert_index, (remote_name, updated_remote))
244 self._remotes = OrderedDict(remotes_list)
245 else:
246 self._remotes[remote_name] = updated_remote
247
248
249 class RemoteRegistry(object):
250
251 def __init__(self, cache):
252 self._cache = cache
253 self._filename = cache.registry_path
254
255 def load_remotes(self):
256 content = load(self._filename)
257 return Remotes.loads(content)
258
259 def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
260 remotes = self.load_remotes()
261 renamed = remotes.add(remote_name, url, verify_ssl, insert, force)
262 remotes.save(self._filename)
263 if renamed:
264 for ref in self._cache.all_refs():
265 with self._cache.package_layout(ref).update_metadata() as metadata:
266 if metadata.recipe.remote == renamed:
267 metadata.recipe.remote = remote_name
268 for pkg_metadata in metadata.packages.values():
269 if pkg_metadata.remote == renamed:
270 pkg_metadata.remote = remote_name
271
272 def update(self, remote_name, url, verify_ssl=True, insert=None):
273 remotes = self.load_remotes()
274 remotes.update(remote_name, url, verify_ssl, insert)
275 remotes.save(self._filename)
276
277 def clear(self):
278 remotes = self.load_remotes()
279 remotes.clear()
280 for ref in self._cache.all_refs():
281 with self._cache.package_layout(ref).update_metadata() as metadata:
282 metadata.recipe.remote = None
283 for pkg_metadata in metadata.packages.values():
284 pkg_metadata.remote = None
285 remotes.save(self._filename)
286
287 def remove(self, remote_name):
288 remotes = self.load_remotes()
289 del remotes[remote_name]
290
291 for ref in self._cache.all_refs():
292 with self._cache.package_layout(ref).update_metadata() as metadata:
293 if metadata.recipe.remote == remote_name:
294 metadata.recipe.remote = None
295 for pkg_metadata in metadata.packages.values():
296 if pkg_metadata.remote == remote_name:
297 pkg_metadata.remote = None
298
299 remotes.save(self._filename)
300
301 def define(self, remotes):
302 # For definition from conan config install
303 for ref in self._cache.all_refs():
304 with self._cache.package_layout(ref).update_metadata() as metadata:
305 if metadata.recipe.remote not in remotes:
306 metadata.recipe.remote = None
307 for pkg_metadata in metadata.packages.values():
308 if pkg_metadata.remote not in remotes:
309 pkg_metadata.remote = None
310
311 remotes.save(self._filename)
312
313 def rename(self, remote_name, new_remote_name):
314 remotes = self.load_remotes()
315 remotes.rename(remote_name, new_remote_name)
316
317 for ref in self._cache.all_refs():
318 with self._cache.package_layout(ref).update_metadata() as metadata:
319 if metadata.recipe.remote == remote_name:
320 metadata.recipe.remote = new_remote_name
321 for pkg_metadata in metadata.packages.values():
322 if pkg_metadata.remote == remote_name:
323 pkg_metadata.remote = new_remote_name
324
325 remotes.save(self._filename)
326
327 @property
328 def refs_list(self):
329 result = {}
330 for ref in self._cache.all_refs():
331 metadata = self._cache.package_layout(ref).load_metadata()
332 if metadata.recipe.remote:
333 result[ref] = metadata.recipe.remote
334 return result
335
336 @property
337 def prefs_list(self):
338 result = {}
339 for ref in self._cache.all_refs():
340 metadata = self._cache.package_layout(ref).load_metadata()
341 for pid, pkg_metadata in metadata.packages.items():
342 pref = PackageReference(ref, pid)
343 result[pref] = pkg_metadata.remote
344 return result
345
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/cache/remote_registry.py b/conans/client/cache/remote_registry.py
--- a/conans/client/cache/remote_registry.py
+++ b/conans/client/cache/remote_registry.py
@@ -230,7 +230,7 @@
def _add_update(self, remote_name, url, verify_ssl, insert=None):
prev_remote = self._get_by_url(url)
- if prev_remote and verify_ssl == prev_remote.verify_ssl:
+ if prev_remote and verify_ssl == prev_remote.verify_ssl and insert is None:
raise ConanException("Remote '%s' already exists with same URL" % prev_remote.name)
updated_remote = Remote(remote_name, url, verify_ssl)
if insert is not None:
|
{"golden_diff": "diff --git a/conans/client/cache/remote_registry.py b/conans/client/cache/remote_registry.py\n--- a/conans/client/cache/remote_registry.py\n+++ b/conans/client/cache/remote_registry.py\n@@ -230,7 +230,7 @@\n \n def _add_update(self, remote_name, url, verify_ssl, insert=None):\n prev_remote = self._get_by_url(url)\n- if prev_remote and verify_ssl == prev_remote.verify_ssl:\n+ if prev_remote and verify_ssl == prev_remote.verify_ssl and insert is None:\n raise ConanException(\"Remote '%s' already exists with same URL\" % prev_remote.name)\n updated_remote = Remote(remote_name, url, verify_ssl)\n if insert is not None:\n", "issue": "Conan remote update command error in 1.15\nWith the Conan 1.15 release it is no longer possible to use the `conan remote update` command to ensure an existing remote is configured to be at the specified index. Reverting to Conan 1.14 sees the command work as expected.\r\n\r\n conan remote update ci-conan https://my-artifactory-instance.com/artifactory/api/conan/ci-conan --insert=0 \r\n ERROR: Remote 'ci-conan' already exists with same URL\r\n\r\n\n", "before_files": [{"content": "import json\nimport os\nfrom collections import OrderedDict, namedtuple\n\nfrom conans.errors import ConanException, NoRemoteAvailable\nfrom conans.util.config_parser import get_bool_from_text_value\nfrom conans.util.files import load, save\nfrom conans.model.ref import PackageReference, ConanFileReference\n\n\nRemote = namedtuple(\"Remote\", \"name url verify_ssl\")\n\n\ndef load_registry_txt(contents):\n \"\"\"Remove in Conan 2.0\"\"\"\n remotes = Remotes()\n refs = {}\n end_remotes = False\n # Parse the file\n for line in contents.splitlines():\n line = line.strip()\n\n if not line:\n if end_remotes:\n raise ConanException(\"Bad file format, blank line\")\n end_remotes = True\n continue\n chunks = line.split()\n if not end_remotes:\n if len(chunks) == 2: # Retro compatibility\n remote_name, url = chunks\n verify_ssl = \"True\"\n elif len(chunks) == 3:\n remote_name, url, verify_ssl = chunks\n else:\n raise ConanException(\"Bad file format, wrong item numbers in line '%s'\" % line)\n\n verify_ssl = get_bool_from_text_value(verify_ssl)\n remotes.add(remote_name, url, verify_ssl)\n else:\n ref, remote_name = chunks\n refs[ref] = remote_name\n\n return remotes, refs\n\n\ndef load_old_registry_json(contents):\n \"\"\"From json\"\"\"\n data = json.loads(contents)\n remotes = Remotes()\n refs = data.get(\"references\", {})\n prefs = data.get(\"package_references\", {})\n for r in data[\"remotes\"]:\n remotes.add(r[\"name\"], r[\"url\"], r[\"verify_ssl\"])\n return remotes, refs, prefs\n\n\ndef migrate_registry_file(cache, out):\n folder = cache.conan_folder\n reg_json_path = os.path.join(folder, \"registry.json\")\n reg_txt_path = os.path.join(folder, \"registry.txt\")\n remotes_path = cache.registry_path\n\n def add_ref_remote(reference, remotes, remote_name):\n ref = ConanFileReference.loads(reference, validate=True)\n remote = remotes.get(remote_name)\n if remote:\n with cache.package_layout(ref).update_metadata() as metadata:\n metadata.recipe.remote = remote.name\n\n def add_pref_remote(pkg_ref, remotes, remote_name):\n pref = PackageReference.loads(pkg_ref, validate=True)\n remote = remotes.get(remote_name)\n if remote:\n with cache.package_layout(pref.ref).update_metadata() as metadata:\n metadata.packages[pref.id].remote = remote.name\n\n try:\n if os.path.exists(reg_json_path):\n out.warn(\"registry.json has been deprecated. Migrating to remotes.json\")\n remotes, refs, prefs = load_old_registry_json(load(reg_json_path))\n remotes.save(remotes_path)\n for ref, remote_name in refs.items():\n add_ref_remote(ref, remotes, remote_name)\n for pref, remote_name in prefs.items():\n add_pref_remote(pref, remotes, remote_name)\n os.remove(reg_json_path)\n elif os.path.exists(reg_txt_path):\n out.warn(\"registry.txt has been deprecated. Migrating to remotes.json\")\n remotes, refs = load_registry_txt(load(reg_txt_path))\n remotes.save(remotes_path)\n for ref, remote_name in refs.items():\n add_ref_remote(ref, remotes, remote_name)\n os.remove(reg_txt_path)\n\n except Exception as e:\n raise ConanException(\"Cannot migrate old registry: %s\" % str(e))\n\n\nclass Remotes(object):\n def __init__(self):\n self._remotes = OrderedDict()\n self.selected = None\n\n @classmethod\n def defaults(cls):\n result = Remotes()\n result._remotes[\"conan-center\"] = Remote(\"conan-center\", \"https://conan.bintray.com\", True)\n return result\n\n def select(self, remote_name):\n self.selected = self[remote_name] if remote_name is not None else None\n\n def __bool__(self):\n return bool(self._remotes)\n\n def __nonzero__(self):\n return self.__bool__()\n\n def clear(self):\n self._remotes.clear()\n\n def items(self):\n return self._remotes.items()\n\n def values(self):\n return self._remotes.values()\n\n @staticmethod\n def loads(text):\n result = Remotes()\n data = json.loads(text)\n for r in data.get(\"remotes\", []):\n result._remotes[r[\"name\"]] = Remote(r[\"name\"], r[\"url\"], r[\"verify_ssl\"])\n\n return result\n\n def dumps(self):\n result = []\n for remote in self._remotes.values():\n result.append(\"%s: %s [Verify SSL: %s]\" % (remote.name, remote.url, remote.verify_ssl))\n return \"\\n\".join(result)\n\n def save(self, filename):\n ret = {\"remotes\": [{\"name\": r, \"url\": u, \"verify_ssl\": v}\n for r, (_, u, v) in self._remotes.items()]}\n save(filename, json.dumps(ret, indent=True))\n\n def _get_by_url(self, url):\n for remote in self._remotes.values():\n if remote.url == url:\n return remote\n\n def rename(self, remote_name, new_remote_name):\n if new_remote_name in self._remotes:\n raise ConanException(\"Remote '%s' already exists\" % new_remote_name)\n\n remote = self._remotes[remote_name]\n new_remote = Remote(new_remote_name, remote.url, remote.verify_ssl)\n self._remotes = OrderedDict([(new_remote_name, new_remote) if k == remote_name\n else (k, v) for k, v in self._remotes.items()])\n\n def get_remote(self, remote_name):\n # Returns the remote defined by the name, or the default if is None\n return self[remote_name] if remote_name is not None else self.default\n\n @property\n def default(self):\n try:\n # This is the python way to get the first element of an OrderedDict\n return self._remotes[next(iter(self._remotes))]\n except StopIteration:\n raise NoRemoteAvailable(\"No default remote defined\")\n\n def __contains__(self, remote_name):\n return remote_name in self._remotes\n\n def get(self, remote_name):\n return self._remotes.get(remote_name)\n\n def __getitem__(self, remote_name):\n try:\n return self._remotes[remote_name]\n except KeyError:\n raise NoRemoteAvailable(\"No remote '%s' defined in remotes\" % (remote_name))\n\n def __delitem__(self, remote_name):\n try:\n del self._remotes[remote_name]\n except KeyError:\n raise NoRemoteAvailable(\"No remote '%s' defined in remotes\" % (remote_name))\n\n def _upsert(self, remote_name, url, verify_ssl, insert):\n # Remove duplicates\n updated_remote = Remote(remote_name, url, verify_ssl)\n self._remotes.pop(remote_name, None)\n remotes_list = []\n renamed = None\n\n for name, remote in self._remotes.items():\n if remote.url != url:\n remotes_list.append((name, remote))\n else:\n renamed = name\n\n if insert is not None:\n try:\n insert_index = int(insert)\n except ValueError:\n raise ConanException(\"insert argument must be an integer\")\n remotes_list.insert(insert_index, (remote_name, updated_remote))\n else:\n remotes_list.append((remote_name, updated_remote))\n self._remotes = OrderedDict(remotes_list)\n return renamed\n\n def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):\n if force:\n return self._upsert(remote_name, url, verify_ssl, insert)\n\n if remote_name in self._remotes:\n raise ConanException(\"Remote '%s' already exists in remotes (use update to modify)\"\n % remote_name)\n self._add_update(remote_name, url, verify_ssl, insert)\n\n def update(self, remote_name, url, verify_ssl=True, insert=None):\n if remote_name not in self._remotes:\n raise ConanException(\"Remote '%s' not found in remotes\" % remote_name)\n self._add_update(remote_name, url, verify_ssl, insert)\n\n def _add_update(self, remote_name, url, verify_ssl, insert=None):\n prev_remote = self._get_by_url(url)\n if prev_remote and verify_ssl == prev_remote.verify_ssl:\n raise ConanException(\"Remote '%s' already exists with same URL\" % prev_remote.name)\n updated_remote = Remote(remote_name, url, verify_ssl)\n if insert is not None:\n try:\n insert_index = int(insert)\n except ValueError:\n raise ConanException(\"insert argument must be an integer\")\n self._remotes.pop(remote_name, None) # Remove if exists (update)\n remotes_list = list(self._remotes.items())\n remotes_list.insert(insert_index, (remote_name, updated_remote))\n self._remotes = OrderedDict(remotes_list)\n else:\n self._remotes[remote_name] = updated_remote\n\n\nclass RemoteRegistry(object):\n\n def __init__(self, cache):\n self._cache = cache\n self._filename = cache.registry_path\n\n def load_remotes(self):\n content = load(self._filename)\n return Remotes.loads(content)\n\n def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):\n remotes = self.load_remotes()\n renamed = remotes.add(remote_name, url, verify_ssl, insert, force)\n remotes.save(self._filename)\n if renamed:\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == renamed:\n metadata.recipe.remote = remote_name\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == renamed:\n pkg_metadata.remote = remote_name\n\n def update(self, remote_name, url, verify_ssl=True, insert=None):\n remotes = self.load_remotes()\n remotes.update(remote_name, url, verify_ssl, insert)\n remotes.save(self._filename)\n\n def clear(self):\n remotes = self.load_remotes()\n remotes.clear()\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n pkg_metadata.remote = None\n remotes.save(self._filename)\n\n def remove(self, remote_name):\n remotes = self.load_remotes()\n del remotes[remote_name]\n\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == remote_name:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == remote_name:\n pkg_metadata.remote = None\n\n remotes.save(self._filename)\n\n def define(self, remotes):\n # For definition from conan config install\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote not in remotes:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote not in remotes:\n pkg_metadata.remote = None\n\n remotes.save(self._filename)\n\n def rename(self, remote_name, new_remote_name):\n remotes = self.load_remotes()\n remotes.rename(remote_name, new_remote_name)\n\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == remote_name:\n metadata.recipe.remote = new_remote_name\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == remote_name:\n pkg_metadata.remote = new_remote_name\n\n remotes.save(self._filename)\n\n @property\n def refs_list(self):\n result = {}\n for ref in self._cache.all_refs():\n metadata = self._cache.package_layout(ref).load_metadata()\n if metadata.recipe.remote:\n result[ref] = metadata.recipe.remote\n return result\n\n @property\n def prefs_list(self):\n result = {}\n for ref in self._cache.all_refs():\n metadata = self._cache.package_layout(ref).load_metadata()\n for pid, pkg_metadata in metadata.packages.items():\n pref = PackageReference(ref, pid)\n result[pref] = pkg_metadata.remote\n return result\n", "path": "conans/client/cache/remote_registry.py"}], "after_files": [{"content": "import json\nimport os\nfrom collections import OrderedDict, namedtuple\n\nfrom conans.errors import ConanException, NoRemoteAvailable\nfrom conans.util.config_parser import get_bool_from_text_value\nfrom conans.util.files import load, save\nfrom conans.model.ref import PackageReference, ConanFileReference\n\n\nRemote = namedtuple(\"Remote\", \"name url verify_ssl\")\n\n\ndef load_registry_txt(contents):\n \"\"\"Remove in Conan 2.0\"\"\"\n remotes = Remotes()\n refs = {}\n end_remotes = False\n # Parse the file\n for line in contents.splitlines():\n line = line.strip()\n\n if not line:\n if end_remotes:\n raise ConanException(\"Bad file format, blank line\")\n end_remotes = True\n continue\n chunks = line.split()\n if not end_remotes:\n if len(chunks) == 2: # Retro compatibility\n remote_name, url = chunks\n verify_ssl = \"True\"\n elif len(chunks) == 3:\n remote_name, url, verify_ssl = chunks\n else:\n raise ConanException(\"Bad file format, wrong item numbers in line '%s'\" % line)\n\n verify_ssl = get_bool_from_text_value(verify_ssl)\n remotes.add(remote_name, url, verify_ssl)\n else:\n ref, remote_name = chunks\n refs[ref] = remote_name\n\n return remotes, refs\n\n\ndef load_old_registry_json(contents):\n \"\"\"From json\"\"\"\n data = json.loads(contents)\n remotes = Remotes()\n refs = data.get(\"references\", {})\n prefs = data.get(\"package_references\", {})\n for r in data[\"remotes\"]:\n remotes.add(r[\"name\"], r[\"url\"], r[\"verify_ssl\"])\n return remotes, refs, prefs\n\n\ndef migrate_registry_file(cache, out):\n folder = cache.conan_folder\n reg_json_path = os.path.join(folder, \"registry.json\")\n reg_txt_path = os.path.join(folder, \"registry.txt\")\n remotes_path = cache.registry_path\n\n def add_ref_remote(reference, remotes, remote_name):\n ref = ConanFileReference.loads(reference, validate=True)\n remote = remotes.get(remote_name)\n if remote:\n with cache.package_layout(ref).update_metadata() as metadata:\n metadata.recipe.remote = remote.name\n\n def add_pref_remote(pkg_ref, remotes, remote_name):\n pref = PackageReference.loads(pkg_ref, validate=True)\n remote = remotes.get(remote_name)\n if remote:\n with cache.package_layout(pref.ref).update_metadata() as metadata:\n metadata.packages[pref.id].remote = remote.name\n\n try:\n if os.path.exists(reg_json_path):\n out.warn(\"registry.json has been deprecated. Migrating to remotes.json\")\n remotes, refs, prefs = load_old_registry_json(load(reg_json_path))\n remotes.save(remotes_path)\n for ref, remote_name in refs.items():\n add_ref_remote(ref, remotes, remote_name)\n for pref, remote_name in prefs.items():\n add_pref_remote(pref, remotes, remote_name)\n os.remove(reg_json_path)\n elif os.path.exists(reg_txt_path):\n out.warn(\"registry.txt has been deprecated. Migrating to remotes.json\")\n remotes, refs = load_registry_txt(load(reg_txt_path))\n remotes.save(remotes_path)\n for ref, remote_name in refs.items():\n add_ref_remote(ref, remotes, remote_name)\n os.remove(reg_txt_path)\n\n except Exception as e:\n raise ConanException(\"Cannot migrate old registry: %s\" % str(e))\n\n\nclass Remotes(object):\n def __init__(self):\n self._remotes = OrderedDict()\n self.selected = None\n\n @classmethod\n def defaults(cls):\n result = Remotes()\n result._remotes[\"conan-center\"] = Remote(\"conan-center\", \"https://conan.bintray.com\", True)\n return result\n\n def select(self, remote_name):\n self.selected = self[remote_name] if remote_name is not None else None\n\n def __bool__(self):\n return bool(self._remotes)\n\n def __nonzero__(self):\n return self.__bool__()\n\n def clear(self):\n self._remotes.clear()\n\n def items(self):\n return self._remotes.items()\n\n def values(self):\n return self._remotes.values()\n\n @staticmethod\n def loads(text):\n result = Remotes()\n data = json.loads(text)\n for r in data.get(\"remotes\", []):\n result._remotes[r[\"name\"]] = Remote(r[\"name\"], r[\"url\"], r[\"verify_ssl\"])\n\n return result\n\n def dumps(self):\n result = []\n for remote in self._remotes.values():\n result.append(\"%s: %s [Verify SSL: %s]\" % (remote.name, remote.url, remote.verify_ssl))\n return \"\\n\".join(result)\n\n def save(self, filename):\n ret = {\"remotes\": [{\"name\": r, \"url\": u, \"verify_ssl\": v}\n for r, (_, u, v) in self._remotes.items()]}\n save(filename, json.dumps(ret, indent=True))\n\n def _get_by_url(self, url):\n for remote in self._remotes.values():\n if remote.url == url:\n return remote\n\n def rename(self, remote_name, new_remote_name):\n if new_remote_name in self._remotes:\n raise ConanException(\"Remote '%s' already exists\" % new_remote_name)\n\n remote = self._remotes[remote_name]\n new_remote = Remote(new_remote_name, remote.url, remote.verify_ssl)\n self._remotes = OrderedDict([(new_remote_name, new_remote) if k == remote_name\n else (k, v) for k, v in self._remotes.items()])\n\n def get_remote(self, remote_name):\n # Returns the remote defined by the name, or the default if is None\n return self[remote_name] if remote_name is not None else self.default\n\n @property\n def default(self):\n try:\n # This is the python way to get the first element of an OrderedDict\n return self._remotes[next(iter(self._remotes))]\n except StopIteration:\n raise NoRemoteAvailable(\"No default remote defined\")\n\n def __contains__(self, remote_name):\n return remote_name in self._remotes\n\n def get(self, remote_name):\n return self._remotes.get(remote_name)\n\n def __getitem__(self, remote_name):\n try:\n return self._remotes[remote_name]\n except KeyError:\n raise NoRemoteAvailable(\"No remote '%s' defined in remotes\" % (remote_name))\n\n def __delitem__(self, remote_name):\n try:\n del self._remotes[remote_name]\n except KeyError:\n raise NoRemoteAvailable(\"No remote '%s' defined in remotes\" % (remote_name))\n\n def _upsert(self, remote_name, url, verify_ssl, insert):\n # Remove duplicates\n updated_remote = Remote(remote_name, url, verify_ssl)\n self._remotes.pop(remote_name, None)\n remotes_list = []\n renamed = None\n\n for name, remote in self._remotes.items():\n if remote.url != url:\n remotes_list.append((name, remote))\n else:\n renamed = name\n\n if insert is not None:\n try:\n insert_index = int(insert)\n except ValueError:\n raise ConanException(\"insert argument must be an integer\")\n remotes_list.insert(insert_index, (remote_name, updated_remote))\n else:\n remotes_list.append((remote_name, updated_remote))\n self._remotes = OrderedDict(remotes_list)\n return renamed\n\n def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):\n if force:\n return self._upsert(remote_name, url, verify_ssl, insert)\n\n if remote_name in self._remotes:\n raise ConanException(\"Remote '%s' already exists in remotes (use update to modify)\"\n % remote_name)\n self._add_update(remote_name, url, verify_ssl, insert)\n\n def update(self, remote_name, url, verify_ssl=True, insert=None):\n if remote_name not in self._remotes:\n raise ConanException(\"Remote '%s' not found in remotes\" % remote_name)\n self._add_update(remote_name, url, verify_ssl, insert)\n\n def _add_update(self, remote_name, url, verify_ssl, insert=None):\n prev_remote = self._get_by_url(url)\n if prev_remote and verify_ssl == prev_remote.verify_ssl and insert is None:\n raise ConanException(\"Remote '%s' already exists with same URL\" % prev_remote.name)\n updated_remote = Remote(remote_name, url, verify_ssl)\n if insert is not None:\n try:\n insert_index = int(insert)\n except ValueError:\n raise ConanException(\"insert argument must be an integer\")\n self._remotes.pop(remote_name, None) # Remove if exists (update)\n remotes_list = list(self._remotes.items())\n remotes_list.insert(insert_index, (remote_name, updated_remote))\n self._remotes = OrderedDict(remotes_list)\n else:\n self._remotes[remote_name] = updated_remote\n\n\nclass RemoteRegistry(object):\n\n def __init__(self, cache):\n self._cache = cache\n self._filename = cache.registry_path\n\n def load_remotes(self):\n content = load(self._filename)\n return Remotes.loads(content)\n\n def add(self, remote_name, url, verify_ssl=True, insert=None, force=None):\n remotes = self.load_remotes()\n renamed = remotes.add(remote_name, url, verify_ssl, insert, force)\n remotes.save(self._filename)\n if renamed:\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == renamed:\n metadata.recipe.remote = remote_name\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == renamed:\n pkg_metadata.remote = remote_name\n\n def update(self, remote_name, url, verify_ssl=True, insert=None):\n remotes = self.load_remotes()\n remotes.update(remote_name, url, verify_ssl, insert)\n remotes.save(self._filename)\n\n def clear(self):\n remotes = self.load_remotes()\n remotes.clear()\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n pkg_metadata.remote = None\n remotes.save(self._filename)\n\n def remove(self, remote_name):\n remotes = self.load_remotes()\n del remotes[remote_name]\n\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == remote_name:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == remote_name:\n pkg_metadata.remote = None\n\n remotes.save(self._filename)\n\n def define(self, remotes):\n # For definition from conan config install\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote not in remotes:\n metadata.recipe.remote = None\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote not in remotes:\n pkg_metadata.remote = None\n\n remotes.save(self._filename)\n\n def rename(self, remote_name, new_remote_name):\n remotes = self.load_remotes()\n remotes.rename(remote_name, new_remote_name)\n\n for ref in self._cache.all_refs():\n with self._cache.package_layout(ref).update_metadata() as metadata:\n if metadata.recipe.remote == remote_name:\n metadata.recipe.remote = new_remote_name\n for pkg_metadata in metadata.packages.values():\n if pkg_metadata.remote == remote_name:\n pkg_metadata.remote = new_remote_name\n\n remotes.save(self._filename)\n\n @property\n def refs_list(self):\n result = {}\n for ref in self._cache.all_refs():\n metadata = self._cache.package_layout(ref).load_metadata()\n if metadata.recipe.remote:\n result[ref] = metadata.recipe.remote\n return result\n\n @property\n def prefs_list(self):\n result = {}\n for ref in self._cache.all_refs():\n metadata = self._cache.package_layout(ref).load_metadata()\n for pid, pkg_metadata in metadata.packages.items():\n pref = PackageReference(ref, pid)\n result[pref] = pkg_metadata.remote\n return result\n", "path": "conans/client/cache/remote_registry.py"}]}
| 4,089 | 160 |
gh_patches_debug_4842
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-3972
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError thrown when during groups lookup
The offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this:
```
Traceback (innermost last):
Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents
Module ZPublisher.WSGIPublisher, line 385, in publish_module
Module ZPublisher.WSGIPublisher, line 280, in publish
Module ZPublisher.mapply, line 85, in mapply
Module ZPublisher.WSGIPublisher, line 63, in call_object
Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__
Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update
Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups
TypeError: '<' not supported between instances of 'bool' and 'str'
```
The issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py`
Content:
```
1 from plone.base import PloneMessageFactory as _
2 from Products.CMFCore.utils import getToolByName
3 from Products.CMFPlone.controlpanel.browser.usergroups import (
4 UsersGroupsControlPanelView,
5 )
6 from Products.CMFPlone.utils import normalizeString
7 from zExceptions import Forbidden
8
9
10 class UserMembershipControlPanel(UsersGroupsControlPanelView):
11 def update(self):
12 self.userid = getattr(self.request, "userid")
13 self.gtool = getToolByName(self, "portal_groups")
14 self.mtool = getToolByName(self, "portal_membership")
15 self.member = self.mtool.getMemberById(self.userid)
16
17 form = self.request.form
18
19 self.searchResults = []
20 self.searchString = ""
21 self.newSearch = False
22
23 if form.get("form.submitted", False):
24 delete = form.get("delete", [])
25 if delete:
26 for groupname in delete:
27 self.gtool.removePrincipalFromGroup(
28 self.userid, groupname, self.request
29 )
30 self.context.plone_utils.addPortalMessage(_("Changes made."))
31
32 add = form.get("add", [])
33 if add:
34 for groupname in add:
35 group = self.gtool.getGroupById(groupname)
36 if "Manager" in group.getRoles() and not self.is_zope_manager:
37 raise Forbidden
38
39 self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)
40 self.context.plone_utils.addPortalMessage(_("Changes made."))
41
42 search = form.get("form.button.Search", None) is not None
43 findAll = (
44 form.get("form.button.FindAll", None) is not None and not self.many_groups
45 )
46 self.searchString = not findAll and form.get("searchstring", "") or ""
47
48 if findAll or not self.many_groups or self.searchString != "":
49 self.searchResults = self.getPotentialGroups(self.searchString)
50
51 if search or findAll:
52 self.newSearch = True
53
54 self.groups = self.getGroups()
55
56 def __call__(self):
57 self.update()
58 return self.index()
59
60 def getGroups(self):
61 groupResults = [
62 self.gtool.getGroupById(m)
63 for m in self.gtool.getGroupsForPrincipal(self.member)
64 ]
65 groupResults.sort(
66 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
67 )
68 return [i for i in groupResults if i]
69
70 def getPotentialGroups(self, searchString):
71 ignoredGroups = [x.id for x in self.getGroups() if x is not None]
72 return self.membershipSearch(
73 searchString, searchUsers=False, ignore=ignoredGroups
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py
@@ -63,7 +63,7 @@
for m in self.gtool.getGroupsForPrincipal(self.member)
]
groupResults.sort(
- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
+ key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''
)
return [i for i in groupResults if i]
|
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py\n@@ -63,7 +63,7 @@\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n+ key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''\n )\n return [i for i in groupResults if i]\n", "issue": "TypeError thrown when during groups lookup\nThe offending code is [here](https://github.com/plone/Products.CMFPlone/blob/308aa4d03ee6c0ce9d8119ce4c37955153f0bc6f/Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py#L66). The traceback looks like this:\r\n```\r\nTraceback (innermost last):\r\n Module ZPublisher.WSGIPublisher, line 176, in transaction_pubevents\r\n Module ZPublisher.WSGIPublisher, line 385, in publish_module\r\n Module ZPublisher.WSGIPublisher, line 280, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module ZPublisher.WSGIPublisher, line 63, in call_object\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 57, in __call__\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 54, in update\r\n Module Products.CMFPlone.controlpanel.browser.usergroups_usermembership, line 63, in getGroups\r\nTypeError: '<' not supported between instances of 'bool' and 'str'\r\n```\r\n\r\nThe issue is that when there's a `None` value in the `groupResults` (which is anticipated in the sort code) the lambda returns `False` which fails to compare against the group title/name strings under Python 3. The list comprehension that defines `groupResults` should probably just filter out `None` values to avoid this issue. I'm not entirely sure what circumstances result in a `None` group value, but I am seeing it occur in a real world use case.\n", "before_files": [{"content": "from plone.base import PloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass UserMembershipControlPanel(UsersGroupsControlPanelView):\n def update(self):\n self.userid = getattr(self.request, \"userid\")\n self.gtool = getToolByName(self, \"portal_groups\")\n self.mtool = getToolByName(self, \"portal_membership\")\n self.member = self.mtool.getMemberById(self.userid)\n\n form = self.request.form\n\n self.searchResults = []\n self.searchString = \"\"\n self.newSearch = False\n\n if form.get(\"form.submitted\", False):\n delete = form.get(\"delete\", [])\n if delete:\n for groupname in delete:\n self.gtool.removePrincipalFromGroup(\n self.userid, groupname, self.request\n )\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n add = form.get(\"add\", [])\n if add:\n for groupname in add:\n group = self.gtool.getGroupById(groupname)\n if \"Manager\" in group.getRoles() and not self.is_zope_manager:\n raise Forbidden\n\n self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n search = form.get(\"form.button.Search\", None) is not None\n findAll = (\n form.get(\"form.button.FindAll\", None) is not None and not self.many_groups\n )\n self.searchString = not findAll and form.get(\"searchstring\", \"\") or \"\"\n\n if findAll or not self.many_groups or self.searchString != \"\":\n self.searchResults = self.getPotentialGroups(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groups = self.getGroups()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def getGroups(self):\n groupResults = [\n self.gtool.getGroupById(m)\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n return [i for i in groupResults if i]\n\n def getPotentialGroups(self, searchString):\n ignoredGroups = [x.id for x in self.getGroups() if x is not None]\n return self.membershipSearch(\n searchString, searchUsers=False, ignore=ignoredGroups\n )\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py"}], "after_files": [{"content": "from plone.base import PloneMessageFactory as _\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.controlpanel.browser.usergroups import (\n UsersGroupsControlPanelView,\n)\nfrom Products.CMFPlone.utils import normalizeString\nfrom zExceptions import Forbidden\n\n\nclass UserMembershipControlPanel(UsersGroupsControlPanelView):\n def update(self):\n self.userid = getattr(self.request, \"userid\")\n self.gtool = getToolByName(self, \"portal_groups\")\n self.mtool = getToolByName(self, \"portal_membership\")\n self.member = self.mtool.getMemberById(self.userid)\n\n form = self.request.form\n\n self.searchResults = []\n self.searchString = \"\"\n self.newSearch = False\n\n if form.get(\"form.submitted\", False):\n delete = form.get(\"delete\", [])\n if delete:\n for groupname in delete:\n self.gtool.removePrincipalFromGroup(\n self.userid, groupname, self.request\n )\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n add = form.get(\"add\", [])\n if add:\n for groupname in add:\n group = self.gtool.getGroupById(groupname)\n if \"Manager\" in group.getRoles() and not self.is_zope_manager:\n raise Forbidden\n\n self.gtool.addPrincipalToGroup(self.userid, groupname, self.request)\n self.context.plone_utils.addPortalMessage(_(\"Changes made.\"))\n\n search = form.get(\"form.button.Search\", None) is not None\n findAll = (\n form.get(\"form.button.FindAll\", None) is not None and not self.many_groups\n )\n self.searchString = not findAll and form.get(\"searchstring\", \"\") or \"\"\n\n if findAll or not self.many_groups or self.searchString != \"\":\n self.searchResults = self.getPotentialGroups(self.searchString)\n\n if search or findAll:\n self.newSearch = True\n\n self.groups = self.getGroups()\n\n def __call__(self):\n self.update()\n return self.index()\n\n def getGroups(self):\n groupResults = [\n self.gtool.getGroupById(m)\n for m in self.gtool.getGroupsForPrincipal(self.member)\n ]\n groupResults.sort(\n key=lambda x: normalizeString(x.getGroupTitleOrName()) if x else ''\n )\n return [i for i in groupResults if i]\n\n def getPotentialGroups(self, searchString):\n ignoredGroups = [x.id for x in self.getGroups() if x is not None]\n return self.membershipSearch(\n searchString, searchUsers=False, ignore=ignoredGroups\n )\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups_usermembership.py"}]}
| 1,356 | 158 |
gh_patches_debug_19305
|
rasdani/github-patches
|
git_diff
|
fail2ban__fail2ban-940
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WARNING 'ignoreregex' not defined
Hello there
I'm seeing this error each time I restart fail2ban:
```
WARNING 'ignoreregex' not defined in 'Definition'. Using default one: ''
```
No idea to which one filter this is referring to. Any ideas?
Here more information. Thanks!
```
$ fail2ban-client -d -v
INFO Using socket file /var/run/fail2ban/fail2ban.sock
WARNING 'ignoreregex' not defined in 'Definition'. Using default one: ''
['set', 'loglevel', 3]
['set', 'logtarget', '/var/log/fail2ban.log']
...
```
```
$ fail2ban-client -V
Fail2Ban v0.8.11
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/python
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 noet :
4
5 # This file is part of Fail2Ban.
6 #
7 # Fail2Ban is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # Fail2Ban is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Fail2Ban; if not, write to the Free Software
19 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
21 __author__ = "Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko"
22 __copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors"
23 __license__ = "GPL"
24
25 try:
26 import setuptools
27 from setuptools import setup
28 except ImportError:
29 setuptools = None
30 from distutils.core import setup
31
32 try:
33 # python 3.x
34 from distutils.command.build_py import build_py_2to3 as build_py
35 from distutils.command.build_scripts \
36 import build_scripts_2to3 as build_scripts
37 except ImportError:
38 # python 2.x
39 from distutils.command.build_py import build_py
40 from distutils.command.build_scripts import build_scripts
41 import os
42 from os.path import isfile, join, isdir
43 import sys, warnings
44 from glob import glob
45
46 if setuptools and "test" in sys.argv:
47 import logging
48 logSys = logging.getLogger("fail2ban")
49 hdlr = logging.StreamHandler(sys.stdout)
50 fmt = logging.Formatter("%(asctime)-15s %(message)s")
51 hdlr.setFormatter(fmt)
52 logSys.addHandler(hdlr)
53 if set(["-q", "--quiet"]) & set(sys.argv):
54 logSys.setLevel(logging.CRITICAL)
55 warnings.simplefilter("ignore")
56 sys.warnoptions.append("ignore")
57 elif set(["-v", "--verbose"]) & set(sys.argv):
58 logSys.setLevel(logging.DEBUG)
59 else:
60 logSys.setLevel(logging.INFO)
61 elif "test" in sys.argv:
62 print("python distribute required to execute fail2ban tests")
63 print("")
64
65 longdesc = '''
66 Fail2Ban scans log files like /var/log/pwdfail or
67 /var/log/apache/error_log and bans IP that makes
68 too many password failures. It updates firewall rules
69 to reject the IP address or executes user defined
70 commands.'''
71
72 if setuptools:
73 setup_extra = {
74 'test_suite': "fail2ban.tests.utils.gatherTests",
75 'use_2to3': True,
76 }
77 else:
78 setup_extra = {}
79
80 # Get version number, avoiding importing fail2ban.
81 # This is due to tests not functioning for python3 as 2to3 takes place later
82 exec(open(join("fail2ban", "version.py")).read())
83
84 setup(
85 name = "fail2ban",
86 version = version,
87 description = "Ban IPs that make too many password failures",
88 long_description = longdesc,
89 author = "Cyril Jaquier & Fail2Ban Contributors",
90 author_email = "[email protected]",
91 url = "http://www.fail2ban.org",
92 license = "GPL",
93 platforms = "Posix",
94 cmdclass = {'build_py': build_py, 'build_scripts': build_scripts},
95 scripts = [
96 'bin/fail2ban-client',
97 'bin/fail2ban-server',
98 'bin/fail2ban-regex',
99 'bin/fail2ban-testcases',
100 ],
101 packages = [
102 'fail2ban',
103 'fail2ban.client',
104 'fail2ban.server',
105 'fail2ban.tests',
106 'fail2ban.tests.action_d',
107 ],
108 package_data = {
109 'fail2ban.tests':
110 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
111 for w in os.walk('fail2ban/tests/files')
112 for f in w[2]] +
113 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
114 for w in os.walk('fail2ban/tests/config')
115 for f in w[2]] +
116 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
117 for w in os.walk('fail2ban/tests/action_d')
118 for f in w[2]]
119 },
120 data_files = [
121 ('/etc/fail2ban',
122 glob("config/*.conf")
123 ),
124 ('/etc/fail2ban/filter.d',
125 glob("config/filter.d/*.conf")
126 ),
127 ('/etc/fail2ban/action.d',
128 glob("config/action.d/*.conf") +
129 glob("config/action.d/*.py")
130 ),
131 ('/etc/fail2ban/fail2ban.d',
132 ''
133 ),
134 ('/etc/fail2ban/jail.d',
135 ''
136 ),
137 ('/var/lib/fail2ban',
138 ''
139 ),
140 ('/usr/share/doc/fail2ban',
141 ['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',
142 'doc/run-rootless.txt']
143 )
144 ],
145 **setup_extra
146 )
147
148 # Do some checks after installation
149 # Search for obsolete files.
150 obsoleteFiles = []
151 elements = {
152 "/etc/":
153 [
154 "fail2ban.conf"
155 ],
156 "/usr/bin/":
157 [
158 "fail2ban.py"
159 ],
160 "/usr/lib/fail2ban/":
161 [
162 "version.py",
163 "protocol.py"
164 ]
165 }
166
167 for directory in elements:
168 for f in elements[directory]:
169 path = join(directory, f)
170 if isfile(path):
171 obsoleteFiles.append(path)
172
173 if obsoleteFiles:
174 print("")
175 print("Obsolete files from previous Fail2Ban versions were found on "
176 "your system.")
177 print("Please delete them:")
178 print("")
179 for f in obsoleteFiles:
180 print("\t" + f)
181 print("")
182
183 if isdir("/usr/lib/fail2ban"):
184 print("")
185 print("Fail2ban is not installed under /usr/lib anymore. The new "
186 "location is under /usr/share. Please remove the directory "
187 "/usr/lib/fail2ban and everything under this directory.")
188 print("")
189
190 # Update config file
191 if sys.argv[1] == "install":
192 print("")
193 print("Please do not forget to update your configuration files.")
194 print("They are in /etc/fail2ban/.")
195 print("")
196
```
Path: `fail2ban/client/filterreader.py`
Content:
```
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 noet :
3
4 # This file is part of Fail2Ban.
5 #
6 # Fail2Ban is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # Fail2Ban is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Fail2Ban; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19
20 # Author: Cyril Jaquier
21 #
22
23 __author__ = "Cyril Jaquier"
24 __copyright__ = "Copyright (c) 2004 Cyril Jaquier"
25 __license__ = "GPL"
26
27 import os, shlex
28
29 from .configreader import DefinitionInitConfigReader
30 from ..server.action import CommandAction
31 from ..helpers import getLogger
32
33 # Gets the instance of the logger.
34 logSys = getLogger(__name__)
35
36 class FilterReader(DefinitionInitConfigReader):
37
38 _configOpts = [
39 ["string", "ignoreregex", None],
40 ["string", "failregex", ""],
41 ]
42
43 def setFile(self, fileName):
44 self.__file = fileName
45 DefinitionInitConfigReader.setFile(self, os.path.join("filter.d", fileName))
46
47 def getFile(self):
48 return self.__file
49
50 def getCombined(self):
51 combinedopts = dict(list(self._opts.items()) + list(self._initOpts.items()))
52 if not len(combinedopts):
53 return {};
54 opts = CommandAction.substituteRecursiveTags(combinedopts)
55 if not opts:
56 raise ValueError('recursive tag definitions unable to be resolved')
57 return opts;
58
59 def convert(self):
60 stream = list()
61 opts = self.getCombined()
62 if not len(opts):
63 return stream;
64 for opt, value in opts.iteritems():
65 if opt == "failregex":
66 for regex in value.split('\n'):
67 # Do not send a command if the rule is empty.
68 if regex != '':
69 stream.append(["set", self._jailName, "addfailregex", regex])
70 elif opt == "ignoreregex":
71 for regex in value.split('\n'):
72 # Do not send a command if the rule is empty.
73 if regex != '':
74 stream.append(["set", self._jailName, "addignoreregex", regex])
75 if self._initOpts:
76 if 'maxlines' in self._initOpts:
77 # We warn when multiline regex is used without maxlines > 1
78 # therefore keep sure we set this option first.
79 stream.insert(0, ["set", self._jailName, "maxlines", self._initOpts["maxlines"]])
80 if 'datepattern' in self._initOpts:
81 stream.append(["set", self._jailName, "datepattern", self._initOpts["datepattern"]])
82 # Do not send a command if the match is empty.
83 if self._initOpts.get("journalmatch", '') != '':
84 for match in self._initOpts["journalmatch"].split("\n"):
85 stream.append(
86 ["set", self._jailName, "addjournalmatch"] +
87 shlex.split(match))
88 return stream
89
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/fail2ban/client/filterreader.py b/fail2ban/client/filterreader.py
--- a/fail2ban/client/filterreader.py
+++ b/fail2ban/client/filterreader.py
@@ -71,7 +71,7 @@
for regex in value.split('\n'):
# Do not send a command if the rule is empty.
if regex != '':
- stream.append(["set", self._jailName, "addignoreregex", regex])
+ stream.append(["set", self._jailName, "addignoreregex", regex])
if self._initOpts:
if 'maxlines' in self._initOpts:
# We warn when multiline regex is used without maxlines > 1
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -124,6 +124,9 @@
('/etc/fail2ban/filter.d',
glob("config/filter.d/*.conf")
),
+ ('/etc/fail2ban/filter.d/ignorecommands',
+ glob("config/filter.d/ignorecommands/*")
+ ),
('/etc/fail2ban/action.d',
glob("config/action.d/*.conf") +
glob("config/action.d/*.py")
|
{"golden_diff": "diff --git a/fail2ban/client/filterreader.py b/fail2ban/client/filterreader.py\n--- a/fail2ban/client/filterreader.py\n+++ b/fail2ban/client/filterreader.py\n@@ -71,7 +71,7 @@\n \t\t\t\tfor regex in value.split('\\n'):\n \t\t\t\t\t# Do not send a command if the rule is empty.\n \t\t\t\t\tif regex != '':\n-\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addignoreregex\", regex])\t\t\n+\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addignoreregex\", regex])\n \t\tif self._initOpts:\n \t\t\tif 'maxlines' in self._initOpts:\n \t\t\t\t# We warn when multiline regex is used without maxlines > 1\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -124,6 +124,9 @@\n \t\t('/etc/fail2ban/filter.d',\n \t\t\tglob(\"config/filter.d/*.conf\")\n \t\t),\n+\t\t('/etc/fail2ban/filter.d/ignorecommands',\n+\t\t\tglob(\"config/filter.d/ignorecommands/*\")\n+\t\t),\n \t\t('/etc/fail2ban/action.d',\n \t\t\tglob(\"config/action.d/*.conf\") +\n \t\t\tglob(\"config/action.d/*.py\")\n", "issue": "WARNING 'ignoreregex' not defined\nHello there\n\nI'm seeing this error each time I restart fail2ban:\n\n```\nWARNING 'ignoreregex' not defined in 'Definition'. Using default one: ''\n```\n\nNo idea to which one filter this is referring to. Any ideas?\n\nHere more information. Thanks!\n\n```\n$ fail2ban-client -d -v\nINFO Using socket file /var/run/fail2ban/fail2ban.sock\nWARNING 'ignoreregex' not defined in 'Definition'. Using default one: ''\n['set', 'loglevel', 3]\n['set', 'logtarget', '/var/log/fail2ban.log']\n...\n```\n\n```\n$ fail2ban-client -V\nFail2Ban v0.8.11\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n__author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n__license__ = \"GPL\"\n\ntry:\n\timport setuptools\n\tfrom setuptools import setup\nexcept ImportError:\n\tsetuptools = None\n\tfrom distutils.core import setup\n\ntry:\n\t# python 3.x\n\tfrom distutils.command.build_py import build_py_2to3 as build_py\n\tfrom distutils.command.build_scripts \\\n\t\timport build_scripts_2to3 as build_scripts\nexcept ImportError:\n\t# python 2.x\n\tfrom distutils.command.build_py import build_py\n\tfrom distutils.command.build_scripts import build_scripts\nimport os\nfrom os.path import isfile, join, isdir\nimport sys, warnings\nfrom glob import glob\n\nif setuptools and \"test\" in sys.argv:\n\timport logging\n\tlogSys = logging.getLogger(\"fail2ban\")\n\thdlr = logging.StreamHandler(sys.stdout)\n\tfmt = logging.Formatter(\"%(asctime)-15s %(message)s\")\n\thdlr.setFormatter(fmt)\n\tlogSys.addHandler(hdlr)\n\tif set([\"-q\", \"--quiet\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.CRITICAL)\n\t\twarnings.simplefilter(\"ignore\")\n\t\tsys.warnoptions.append(\"ignore\")\n\telif set([\"-v\", \"--verbose\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.DEBUG)\n\telse:\n\t\tlogSys.setLevel(logging.INFO)\nelif \"test\" in sys.argv:\n\tprint(\"python distribute required to execute fail2ban tests\")\n\tprint(\"\")\n\nlongdesc = '''\nFail2Ban scans log files like /var/log/pwdfail or\n/var/log/apache/error_log and bans IP that makes\ntoo many password failures. It updates firewall rules\nto reject the IP address or executes user defined\ncommands.'''\n\nif setuptools:\n\tsetup_extra = {\n\t\t'test_suite': \"fail2ban.tests.utils.gatherTests\",\n\t\t'use_2to3': True,\n\t}\nelse:\n\tsetup_extra = {}\n\n# Get version number, avoiding importing fail2ban.\n# This is due to tests not functioning for python3 as 2to3 takes place later\nexec(open(join(\"fail2ban\", \"version.py\")).read())\n\nsetup(\n\tname = \"fail2ban\",\n\tversion = version,\n\tdescription = \"Ban IPs that make too many password failures\",\n\tlong_description = longdesc,\n\tauthor = \"Cyril Jaquier & Fail2Ban Contributors\",\n\tauthor_email = \"[email protected]\",\n\turl = \"http://www.fail2ban.org\",\n\tlicense = \"GPL\",\n\tplatforms = \"Posix\",\n\tcmdclass = {'build_py': build_py, 'build_scripts': build_scripts},\n\tscripts = [\n\t\t'bin/fail2ban-client',\n\t\t'bin/fail2ban-server',\n\t\t'bin/fail2ban-regex',\n\t\t'bin/fail2ban-testcases',\n\t],\n\tpackages = [\n\t\t'fail2ban',\n\t\t'fail2ban.client',\n\t\t'fail2ban.server',\n\t\t'fail2ban.tests',\n\t\t'fail2ban.tests.action_d',\n\t],\n\tpackage_data = {\n\t\t'fail2ban.tests':\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/files')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/config')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/action_d')\n\t\t\t\tfor f in w[2]]\n\t},\n\tdata_files = [\n\t\t('/etc/fail2ban',\n\t\t\tglob(\"config/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d',\n\t\t\tglob(\"config/filter.d/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/action.d',\n\t\t\tglob(\"config/action.d/*.conf\") +\n\t\t\tglob(\"config/action.d/*.py\")\n\t\t),\n\t\t('/etc/fail2ban/fail2ban.d',\n\t\t\t''\n\t\t),\n\t\t('/etc/fail2ban/jail.d',\n\t\t\t''\n\t\t),\n\t\t('/var/lib/fail2ban',\n\t\t\t''\n\t\t),\n\t\t('/usr/share/doc/fail2ban',\n\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n\t\t\t 'doc/run-rootless.txt']\n\t\t)\n\t],\n\t**setup_extra\n)\n\n# Do some checks after installation\n# Search for obsolete files.\nobsoleteFiles = []\nelements = {\n\t\"/etc/\":\n\t\t[\n\t\t\t\"fail2ban.conf\"\n\t\t],\n\t\"/usr/bin/\":\n\t\t[\n\t\t\t\"fail2ban.py\"\n\t\t],\n\t\"/usr/lib/fail2ban/\":\n\t\t[\n\t\t\t\"version.py\",\n\t\t\t\"protocol.py\"\n\t\t]\n}\n\nfor directory in elements:\n\tfor f in elements[directory]:\n\t\tpath = join(directory, f)\n\t\tif isfile(path):\n\t\t\tobsoleteFiles.append(path)\n\nif obsoleteFiles:\n\tprint(\"\")\n\tprint(\"Obsolete files from previous Fail2Ban versions were found on \"\n\t\t \"your system.\")\n\tprint(\"Please delete them:\")\n\tprint(\"\")\n\tfor f in obsoleteFiles:\n\t\tprint(\"\\t\" + f)\n\tprint(\"\")\n\nif isdir(\"/usr/lib/fail2ban\"):\n\tprint(\"\")\n\tprint(\"Fail2ban is not installed under /usr/lib anymore. The new \"\n\t\t \"location is under /usr/share. Please remove the directory \"\n\t\t \"/usr/lib/fail2ban and everything under this directory.\")\n\tprint(\"\")\n\n# Update config file\nif sys.argv[1] == \"install\":\n\tprint(\"\")\n\tprint(\"Please do not forget to update your configuration files.\")\n\tprint(\"They are in /etc/fail2ban/.\")\n\tprint(\"\")\n", "path": "setup.py"}, {"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n# \n\n__author__ = \"Cyril Jaquier\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\n\nimport os, shlex\n\nfrom .configreader import DefinitionInitConfigReader\nfrom ..server.action import CommandAction\nfrom ..helpers import getLogger\n\n# Gets the instance of the logger.\nlogSys = getLogger(__name__)\n\nclass FilterReader(DefinitionInitConfigReader):\n\n\t_configOpts = [\n\t\t[\"string\", \"ignoreregex\", None],\n\t\t[\"string\", \"failregex\", \"\"],\n\t]\n\n\tdef setFile(self, fileName):\n\t\tself.__file = fileName\n\t\tDefinitionInitConfigReader.setFile(self, os.path.join(\"filter.d\", fileName))\n\t\n\tdef getFile(self):\n\t\treturn self.__file\n\n\tdef getCombined(self):\n\t\tcombinedopts = dict(list(self._opts.items()) + list(self._initOpts.items()))\n\t\tif not len(combinedopts):\n\t\t\treturn {};\n\t\topts = CommandAction.substituteRecursiveTags(combinedopts)\n\t\tif not opts:\n\t\t\traise ValueError('recursive tag definitions unable to be resolved')\n\t\treturn opts;\n\t\n\tdef convert(self):\n\t\tstream = list()\n\t\topts = self.getCombined()\n\t\tif not len(opts):\n\t\t\treturn stream;\n\t\tfor opt, value in opts.iteritems():\n\t\t\tif opt == \"failregex\":\n\t\t\t\tfor regex in value.split('\\n'):\n\t\t\t\t\t# Do not send a command if the rule is empty.\n\t\t\t\t\tif regex != '':\n\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addfailregex\", regex])\n\t\t\telif opt == \"ignoreregex\":\n\t\t\t\tfor regex in value.split('\\n'):\n\t\t\t\t\t# Do not send a command if the rule is empty.\n\t\t\t\t\tif regex != '':\n\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addignoreregex\", regex])\t\t\n\t\tif self._initOpts:\n\t\t\tif 'maxlines' in self._initOpts:\n\t\t\t\t# We warn when multiline regex is used without maxlines > 1\n\t\t\t\t# therefore keep sure we set this option first.\n\t\t\t\tstream.insert(0, [\"set\", self._jailName, \"maxlines\", self._initOpts[\"maxlines\"]])\n\t\t\tif 'datepattern' in self._initOpts:\n\t\t\t\tstream.append([\"set\", self._jailName, \"datepattern\", self._initOpts[\"datepattern\"]])\n\t\t\t# Do not send a command if the match is empty.\n\t\t\tif self._initOpts.get(\"journalmatch\", '') != '':\n\t\t\t\tfor match in self._initOpts[\"journalmatch\"].split(\"\\n\"):\n\t\t\t\t\tstream.append(\n\t\t\t\t\t\t[\"set\", self._jailName, \"addjournalmatch\"] +\n shlex.split(match))\n\t\treturn stream\n\t\t\n", "path": "fail2ban/client/filterreader.py"}], "after_files": [{"content": "#!/usr/bin/python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n__author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n__license__ = \"GPL\"\n\ntry:\n\timport setuptools\n\tfrom setuptools import setup\nexcept ImportError:\n\tsetuptools = None\n\tfrom distutils.core import setup\n\ntry:\n\t# python 3.x\n\tfrom distutils.command.build_py import build_py_2to3 as build_py\n\tfrom distutils.command.build_scripts \\\n\t\timport build_scripts_2to3 as build_scripts\nexcept ImportError:\n\t# python 2.x\n\tfrom distutils.command.build_py import build_py\n\tfrom distutils.command.build_scripts import build_scripts\nimport os\nfrom os.path import isfile, join, isdir\nimport sys, warnings\nfrom glob import glob\n\nif setuptools and \"test\" in sys.argv:\n\timport logging\n\tlogSys = logging.getLogger(\"fail2ban\")\n\thdlr = logging.StreamHandler(sys.stdout)\n\tfmt = logging.Formatter(\"%(asctime)-15s %(message)s\")\n\thdlr.setFormatter(fmt)\n\tlogSys.addHandler(hdlr)\n\tif set([\"-q\", \"--quiet\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.CRITICAL)\n\t\twarnings.simplefilter(\"ignore\")\n\t\tsys.warnoptions.append(\"ignore\")\n\telif set([\"-v\", \"--verbose\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.DEBUG)\n\telse:\n\t\tlogSys.setLevel(logging.INFO)\nelif \"test\" in sys.argv:\n\tprint(\"python distribute required to execute fail2ban tests\")\n\tprint(\"\")\n\nlongdesc = '''\nFail2Ban scans log files like /var/log/pwdfail or\n/var/log/apache/error_log and bans IP that makes\ntoo many password failures. It updates firewall rules\nto reject the IP address or executes user defined\ncommands.'''\n\nif setuptools:\n\tsetup_extra = {\n\t\t'test_suite': \"fail2ban.tests.utils.gatherTests\",\n\t\t'use_2to3': True,\n\t}\nelse:\n\tsetup_extra = {}\n\n# Get version number, avoiding importing fail2ban.\n# This is due to tests not functioning for python3 as 2to3 takes place later\nexec(open(join(\"fail2ban\", \"version.py\")).read())\n\nsetup(\n\tname = \"fail2ban\",\n\tversion = version,\n\tdescription = \"Ban IPs that make too many password failures\",\n\tlong_description = longdesc,\n\tauthor = \"Cyril Jaquier & Fail2Ban Contributors\",\n\tauthor_email = \"[email protected]\",\n\turl = \"http://www.fail2ban.org\",\n\tlicense = \"GPL\",\n\tplatforms = \"Posix\",\n\tcmdclass = {'build_py': build_py, 'build_scripts': build_scripts},\n\tscripts = [\n\t\t'bin/fail2ban-client',\n\t\t'bin/fail2ban-server',\n\t\t'bin/fail2ban-regex',\n\t\t'bin/fail2ban-testcases',\n\t],\n\tpackages = [\n\t\t'fail2ban',\n\t\t'fail2ban.client',\n\t\t'fail2ban.server',\n\t\t'fail2ban.tests',\n\t\t'fail2ban.tests.action_d',\n\t],\n\tpackage_data = {\n\t\t'fail2ban.tests':\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/files')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/config')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/action_d')\n\t\t\t\tfor f in w[2]]\n\t},\n\tdata_files = [\n\t\t('/etc/fail2ban',\n\t\t\tglob(\"config/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d',\n\t\t\tglob(\"config/filter.d/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d/ignorecommands',\n\t\t\tglob(\"config/filter.d/ignorecommands/*\")\n\t\t),\n\t\t('/etc/fail2ban/action.d',\n\t\t\tglob(\"config/action.d/*.conf\") +\n\t\t\tglob(\"config/action.d/*.py\")\n\t\t),\n\t\t('/etc/fail2ban/fail2ban.d',\n\t\t\t''\n\t\t),\n\t\t('/etc/fail2ban/jail.d',\n\t\t\t''\n\t\t),\n\t\t('/var/lib/fail2ban',\n\t\t\t''\n\t\t),\n\t\t('/usr/share/doc/fail2ban',\n\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n\t\t\t 'doc/run-rootless.txt']\n\t\t)\n\t],\n\t**setup_extra\n)\n\n# Do some checks after installation\n# Search for obsolete files.\nobsoleteFiles = []\nelements = {\n\t\"/etc/\":\n\t\t[\n\t\t\t\"fail2ban.conf\"\n\t\t],\n\t\"/usr/bin/\":\n\t\t[\n\t\t\t\"fail2ban.py\"\n\t\t],\n\t\"/usr/lib/fail2ban/\":\n\t\t[\n\t\t\t\"version.py\",\n\t\t\t\"protocol.py\"\n\t\t]\n}\n\nfor directory in elements:\n\tfor f in elements[directory]:\n\t\tpath = join(directory, f)\n\t\tif isfile(path):\n\t\t\tobsoleteFiles.append(path)\n\nif obsoleteFiles:\n\tprint(\"\")\n\tprint(\"Obsolete files from previous Fail2Ban versions were found on \"\n\t\t \"your system.\")\n\tprint(\"Please delete them:\")\n\tprint(\"\")\n\tfor f in obsoleteFiles:\n\t\tprint(\"\\t\" + f)\n\tprint(\"\")\n\nif isdir(\"/usr/lib/fail2ban\"):\n\tprint(\"\")\n\tprint(\"Fail2ban is not installed under /usr/lib anymore. The new \"\n\t\t \"location is under /usr/share. Please remove the directory \"\n\t\t \"/usr/lib/fail2ban and everything under this directory.\")\n\tprint(\"\")\n\n# Update config file\nif sys.argv[1] == \"install\":\n\tprint(\"\")\n\tprint(\"Please do not forget to update your configuration files.\")\n\tprint(\"They are in /etc/fail2ban/.\")\n\tprint(\"\")\n", "path": "setup.py"}, {"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n# Author: Cyril Jaquier\n# \n\n__author__ = \"Cyril Jaquier\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier\"\n__license__ = \"GPL\"\n\nimport os, shlex\n\nfrom .configreader import DefinitionInitConfigReader\nfrom ..server.action import CommandAction\nfrom ..helpers import getLogger\n\n# Gets the instance of the logger.\nlogSys = getLogger(__name__)\n\nclass FilterReader(DefinitionInitConfigReader):\n\n\t_configOpts = [\n\t\t[\"string\", \"ignoreregex\", None],\n\t\t[\"string\", \"failregex\", \"\"],\n\t]\n\n\tdef setFile(self, fileName):\n\t\tself.__file = fileName\n\t\tDefinitionInitConfigReader.setFile(self, os.path.join(\"filter.d\", fileName))\n\t\n\tdef getFile(self):\n\t\treturn self.__file\n\n\tdef getCombined(self):\n\t\tcombinedopts = dict(list(self._opts.items()) + list(self._initOpts.items()))\n\t\tif not len(combinedopts):\n\t\t\treturn {};\n\t\topts = CommandAction.substituteRecursiveTags(combinedopts)\n\t\tif not opts:\n\t\t\traise ValueError('recursive tag definitions unable to be resolved')\n\t\treturn opts;\n\t\n\tdef convert(self):\n\t\tstream = list()\n\t\topts = self.getCombined()\n\t\tif not len(opts):\n\t\t\treturn stream;\n\t\tfor opt, value in opts.iteritems():\n\t\t\tif opt == \"failregex\":\n\t\t\t\tfor regex in value.split('\\n'):\n\t\t\t\t\t# Do not send a command if the rule is empty.\n\t\t\t\t\tif regex != '':\n\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addfailregex\", regex])\n\t\t\telif opt == \"ignoreregex\":\n\t\t\t\tfor regex in value.split('\\n'):\n\t\t\t\t\t# Do not send a command if the rule is empty.\n\t\t\t\t\tif regex != '':\n\t\t\t\t\t\tstream.append([\"set\", self._jailName, \"addignoreregex\", regex])\n\t\tif self._initOpts:\n\t\t\tif 'maxlines' in self._initOpts:\n\t\t\t\t# We warn when multiline regex is used without maxlines > 1\n\t\t\t\t# therefore keep sure we set this option first.\n\t\t\t\tstream.insert(0, [\"set\", self._jailName, \"maxlines\", self._initOpts[\"maxlines\"]])\n\t\t\tif 'datepattern' in self._initOpts:\n\t\t\t\tstream.append([\"set\", self._jailName, \"datepattern\", self._initOpts[\"datepattern\"]])\n\t\t\t# Do not send a command if the match is empty.\n\t\t\tif self._initOpts.get(\"journalmatch\", '') != '':\n\t\t\t\tfor match in self._initOpts[\"journalmatch\"].split(\"\\n\"):\n\t\t\t\t\tstream.append(\n\t\t\t\t\t\t[\"set\", self._jailName, \"addjournalmatch\"] +\n shlex.split(match))\n\t\treturn stream\n\t\t\n", "path": "fail2ban/client/filterreader.py"}]}
| 3,506 | 287 |
gh_patches_debug_29390
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-6302
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG ]At new splitting: "QuerySet' object has no attribute 'extend'" - Workflow adding a custom field on adding new document leads to the error message
### Description
When trying out the new splitting functionality I get the error message:
'QuerySet' object has no attribute 'extend'
A Workflow for adding a custom field on adding new document leads to the error message.
Anybody with the same issue?
### Steps to reproduce
1. Go to Documents
2. Edit Document
3. Split
4. Split Pages 1 and 2
5. Error message
### Webserver logs
```bash
[2024-04-06 11:18:26,873] [DEBUG] [paperless.tasks] Executing plugin WorkflowTriggerPlugin
[2024-04-06 11:18:26,905] [INFO] [paperless.matching] Document matched WorkflowTrigger 3 from Workflow: Benutzerdefinierte Felder hinzufügen
[2024-04-06 11:18:26,910] [ERROR] [paperless.tasks] WorkflowTriggerPlugin failed: 'QuerySet' object has no attribute 'extend'
Traceback (most recent call last):
File "/usr/src/paperless/src/documents/tasks.py", line 144, in consume_file
msg = plugin.run()
^^^^^^^^^^^^
File "/usr/src/paperless/src/documents/consumer.py", line 223, in run
self.metadata.update(overrides)
File "/usr/src/paperless/src/documents/data_models.py", line 64, in update
self.view_users.extend(other.view_users)
^^^^^^^^^^^^^^^^^^^^^^
AttributeError: 'QuerySet' object has no attribute 'extend'
```
### Browser logs
_No response_
### Paperless-ngx version
2.7.0
### Host OS
Docker on Synology NAS - DSM 7.2
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
_No response_
### Other
_No response_
### Please confirm the following
- [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation.
- [X] I have already searched for relevant existing issues and discussions before opening this report.
- [X] I have updated the title field above with a concise description.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/data_models.py`
Content:
```
1 import dataclasses
2 import datetime
3 from enum import IntEnum
4 from pathlib import Path
5 from typing import Optional
6
7 import magic
8 from guardian.shortcuts import get_groups_with_perms
9 from guardian.shortcuts import get_users_with_perms
10
11
12 @dataclasses.dataclass
13 class DocumentMetadataOverrides:
14 """
15 Manages overrides for document fields which normally would
16 be set from content or matching. All fields default to None,
17 meaning no override is happening
18 """
19
20 filename: Optional[str] = None
21 title: Optional[str] = None
22 correspondent_id: Optional[int] = None
23 document_type_id: Optional[int] = None
24 tag_ids: Optional[list[int]] = None
25 storage_path_id: Optional[int] = None
26 created: Optional[datetime.datetime] = None
27 asn: Optional[int] = None
28 owner_id: Optional[int] = None
29 view_users: Optional[list[int]] = None
30 view_groups: Optional[list[int]] = None
31 change_users: Optional[list[int]] = None
32 change_groups: Optional[list[int]] = None
33 custom_field_ids: Optional[list[int]] = None
34
35 def update(self, other: "DocumentMetadataOverrides") -> "DocumentMetadataOverrides":
36 """
37 Merges two DocumentMetadataOverrides objects such that object B's overrides
38 are applied to object A or merged if multiple are accepted.
39
40 The update is an in-place modification of self
41 """
42 # only if empty
43 if other.title is not None:
44 self.title = other.title
45 if other.correspondent_id is not None:
46 self.correspondent_id = other.correspondent_id
47 if other.document_type_id is not None:
48 self.document_type_id = other.document_type_id
49 if other.storage_path_id is not None:
50 self.storage_path_id = other.storage_path_id
51 if other.owner_id is not None:
52 self.owner_id = other.owner_id
53
54 # merge
55 if self.tag_ids is None:
56 self.tag_ids = other.tag_ids
57 elif other.tag_ids is not None:
58 self.tag_ids.extend(other.tag_ids)
59 self.tag_ids = list(set(self.tag_ids))
60
61 if self.view_users is None:
62 self.view_users = other.view_users
63 elif other.view_users is not None:
64 self.view_users.extend(other.view_users)
65 self.view_users = list(set(self.view_users))
66
67 if self.view_groups is None:
68 self.view_groups = other.view_groups
69 elif other.view_groups is not None:
70 self.view_groups.extend(other.view_groups)
71 self.view_groups = list(set(self.view_groups))
72
73 if self.change_users is None:
74 self.change_users = other.change_users
75 elif other.change_users is not None:
76 self.change_users.extend(other.change_users)
77 self.change_users = list(set(self.change_users))
78
79 if self.change_groups is None:
80 self.change_groups = other.change_groups
81 elif other.change_groups is not None:
82 self.change_groups.extend(other.change_groups)
83 self.change_groups = list(set(self.change_groups))
84
85 if self.custom_field_ids is None:
86 self.custom_field_ids = other.custom_field_ids
87 elif other.custom_field_ids is not None:
88 self.custom_field_ids.extend(other.custom_field_ids)
89 self.custom_field_ids = list(set(self.custom_field_ids))
90
91 return self
92
93 @staticmethod
94 def from_document(doc) -> "DocumentMetadataOverrides":
95 """
96 Fills in the overrides from a document object
97 """
98 overrides = DocumentMetadataOverrides()
99 overrides.title = doc.title
100 overrides.correspondent_id = doc.correspondent.id if doc.correspondent else None
101 overrides.document_type_id = doc.document_type.id if doc.document_type else None
102 overrides.storage_path_id = doc.storage_path.id if doc.storage_path else None
103 overrides.owner_id = doc.owner.id if doc.owner else None
104 overrides.tag_ids = list(doc.tags.values_list("id", flat=True))
105
106 overrides.view_users = get_users_with_perms(
107 doc,
108 only_with_perms_in=["view_document"],
109 ).values_list("id", flat=True)
110 overrides.change_users = get_users_with_perms(
111 doc,
112 only_with_perms_in=["change_document"],
113 ).values_list("id", flat=True)
114 overrides.custom_field_ids = list(
115 doc.custom_fields.values_list("id", flat=True),
116 )
117
118 groups_with_perms = get_groups_with_perms(
119 doc,
120 attach_perms=True,
121 )
122 overrides.view_groups = [
123 group.id for group, perms in groups_with_perms if "view_document" in perms
124 ]
125 overrides.change_groups = [
126 group.id for group, perms in groups_with_perms if "change_document" in perms
127 ]
128
129 return overrides
130
131
132 class DocumentSource(IntEnum):
133 """
134 The source of an incoming document. May have other uses in the future
135 """
136
137 ConsumeFolder = 1
138 ApiUpload = 2
139 MailFetch = 3
140
141
142 @dataclasses.dataclass
143 class ConsumableDocument:
144 """
145 Encapsulates an incoming document, either from consume folder, API upload
146 or mail fetching and certain useful operations on it.
147 """
148
149 source: DocumentSource
150 original_file: Path
151 mailrule_id: Optional[int] = None
152 mime_type: str = dataclasses.field(init=False, default=None)
153
154 def __post_init__(self):
155 """
156 After a dataclass is initialized, this is called to finalize some data
157 1. Make sure the original path is an absolute, fully qualified path
158 2. Get the mime type of the file
159 """
160 # Always fully qualify the path first thing
161 # Just in case, convert to a path if it's a str
162 self.original_file = Path(self.original_file).resolve()
163
164 # Get the file type once at init
165 # Note this function isn't called when the object is unpickled
166 self.mime_type = magic.from_file(self.original_file, mime=True)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/documents/data_models.py b/src/documents/data_models.py
--- a/src/documents/data_models.py
+++ b/src/documents/data_models.py
@@ -103,14 +103,18 @@
overrides.owner_id = doc.owner.id if doc.owner else None
overrides.tag_ids = list(doc.tags.values_list("id", flat=True))
- overrides.view_users = get_users_with_perms(
- doc,
- only_with_perms_in=["view_document"],
- ).values_list("id", flat=True)
- overrides.change_users = get_users_with_perms(
- doc,
- only_with_perms_in=["change_document"],
- ).values_list("id", flat=True)
+ overrides.view_users = list(
+ get_users_with_perms(
+ doc,
+ only_with_perms_in=["view_document"],
+ ).values_list("id", flat=True),
+ )
+ overrides.change_users = list(
+ get_users_with_perms(
+ doc,
+ only_with_perms_in=["change_document"],
+ ).values_list("id", flat=True),
+ )
overrides.custom_field_ids = list(
doc.custom_fields.values_list("id", flat=True),
)
@@ -120,10 +124,14 @@
attach_perms=True,
)
overrides.view_groups = [
- group.id for group, perms in groups_with_perms if "view_document" in perms
+ group.id
+ for group in groups_with_perms
+ if "view_document" in groups_with_perms[group]
]
overrides.change_groups = [
- group.id for group, perms in groups_with_perms if "change_document" in perms
+ group.id
+ for group in groups_with_perms
+ if "change_document" in groups_with_perms[group]
]
return overrides
|
{"golden_diff": "diff --git a/src/documents/data_models.py b/src/documents/data_models.py\n--- a/src/documents/data_models.py\n+++ b/src/documents/data_models.py\n@@ -103,14 +103,18 @@\n overrides.owner_id = doc.owner.id if doc.owner else None\n overrides.tag_ids = list(doc.tags.values_list(\"id\", flat=True))\n \n- overrides.view_users = get_users_with_perms(\n- doc,\n- only_with_perms_in=[\"view_document\"],\n- ).values_list(\"id\", flat=True)\n- overrides.change_users = get_users_with_perms(\n- doc,\n- only_with_perms_in=[\"change_document\"],\n- ).values_list(\"id\", flat=True)\n+ overrides.view_users = list(\n+ get_users_with_perms(\n+ doc,\n+ only_with_perms_in=[\"view_document\"],\n+ ).values_list(\"id\", flat=True),\n+ )\n+ overrides.change_users = list(\n+ get_users_with_perms(\n+ doc,\n+ only_with_perms_in=[\"change_document\"],\n+ ).values_list(\"id\", flat=True),\n+ )\n overrides.custom_field_ids = list(\n doc.custom_fields.values_list(\"id\", flat=True),\n )\n@@ -120,10 +124,14 @@\n attach_perms=True,\n )\n overrides.view_groups = [\n- group.id for group, perms in groups_with_perms if \"view_document\" in perms\n+ group.id\n+ for group in groups_with_perms\n+ if \"view_document\" in groups_with_perms[group]\n ]\n overrides.change_groups = [\n- group.id for group, perms in groups_with_perms if \"change_document\" in perms\n+ group.id\n+ for group in groups_with_perms\n+ if \"change_document\" in groups_with_perms[group]\n ]\n \n return overrides\n", "issue": "[BUG ]At new splitting: \"QuerySet' object has no attribute 'extend'\" - Workflow adding a custom field on adding new document leads to the error message\n### Description\n\nWhen trying out the new splitting functionality I get the error message: \r\n\r\n 'QuerySet' object has no attribute 'extend'\r\n\r\nA Workflow for adding a custom field on adding new document leads to the error message.\r\n\r\nAnybody with the same issue?\r\n\n\n### Steps to reproduce\n\n1. Go to Documents\r\n2. Edit Document\r\n3. Split\r\n4. Split Pages 1 and 2\r\n5. Error message\n\n### Webserver logs\n\n```bash\n[2024-04-06 11:18:26,873] [DEBUG] [paperless.tasks] Executing plugin WorkflowTriggerPlugin\r\n\r\n[2024-04-06 11:18:26,905] [INFO] [paperless.matching] Document matched WorkflowTrigger 3 from Workflow: Benutzerdefinierte Felder hinzuf\u00fcgen\r\n\r\n[2024-04-06 11:18:26,910] [ERROR] [paperless.tasks] WorkflowTriggerPlugin failed: 'QuerySet' object has no attribute 'extend'\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/src/paperless/src/documents/tasks.py\", line 144, in consume_file\r\n\r\n msg = plugin.run()\r\n\r\n ^^^^^^^^^^^^\r\n\r\n File \"/usr/src/paperless/src/documents/consumer.py\", line 223, in run\r\n\r\n self.metadata.update(overrides)\r\n\r\n File \"/usr/src/paperless/src/documents/data_models.py\", line 64, in update\r\n\r\n self.view_users.extend(other.view_users)\r\n\r\n ^^^^^^^^^^^^^^^^^^^^^^\r\n\r\nAttributeError: 'QuerySet' object has no attribute 'extend'\n```\n\n\n### Browser logs\n\n_No response_\n\n### Paperless-ngx version\n\n2.7.0\n\n### Host OS\n\nDocker on Synology NAS - DSM 7.2\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n\n### Please confirm the following\n\n- [X] I believe this issue is a bug that affects all users of Paperless-ngx, not something specific to my installation.\n- [X] I have already searched for relevant existing issues and discussions before opening this report.\n- [X] I have updated the title field above with a concise description.\n", "before_files": [{"content": "import dataclasses\nimport datetime\nfrom enum import IntEnum\nfrom pathlib import Path\nfrom typing import Optional\n\nimport magic\nfrom guardian.shortcuts import get_groups_with_perms\nfrom guardian.shortcuts import get_users_with_perms\n\n\[email protected]\nclass DocumentMetadataOverrides:\n \"\"\"\n Manages overrides for document fields which normally would\n be set from content or matching. All fields default to None,\n meaning no override is happening\n \"\"\"\n\n filename: Optional[str] = None\n title: Optional[str] = None\n correspondent_id: Optional[int] = None\n document_type_id: Optional[int] = None\n tag_ids: Optional[list[int]] = None\n storage_path_id: Optional[int] = None\n created: Optional[datetime.datetime] = None\n asn: Optional[int] = None\n owner_id: Optional[int] = None\n view_users: Optional[list[int]] = None\n view_groups: Optional[list[int]] = None\n change_users: Optional[list[int]] = None\n change_groups: Optional[list[int]] = None\n custom_field_ids: Optional[list[int]] = None\n\n def update(self, other: \"DocumentMetadataOverrides\") -> \"DocumentMetadataOverrides\":\n \"\"\"\n Merges two DocumentMetadataOverrides objects such that object B's overrides\n are applied to object A or merged if multiple are accepted.\n\n The update is an in-place modification of self\n \"\"\"\n # only if empty\n if other.title is not None:\n self.title = other.title\n if other.correspondent_id is not None:\n self.correspondent_id = other.correspondent_id\n if other.document_type_id is not None:\n self.document_type_id = other.document_type_id\n if other.storage_path_id is not None:\n self.storage_path_id = other.storage_path_id\n if other.owner_id is not None:\n self.owner_id = other.owner_id\n\n # merge\n if self.tag_ids is None:\n self.tag_ids = other.tag_ids\n elif other.tag_ids is not None:\n self.tag_ids.extend(other.tag_ids)\n self.tag_ids = list(set(self.tag_ids))\n\n if self.view_users is None:\n self.view_users = other.view_users\n elif other.view_users is not None:\n self.view_users.extend(other.view_users)\n self.view_users = list(set(self.view_users))\n\n if self.view_groups is None:\n self.view_groups = other.view_groups\n elif other.view_groups is not None:\n self.view_groups.extend(other.view_groups)\n self.view_groups = list(set(self.view_groups))\n\n if self.change_users is None:\n self.change_users = other.change_users\n elif other.change_users is not None:\n self.change_users.extend(other.change_users)\n self.change_users = list(set(self.change_users))\n\n if self.change_groups is None:\n self.change_groups = other.change_groups\n elif other.change_groups is not None:\n self.change_groups.extend(other.change_groups)\n self.change_groups = list(set(self.change_groups))\n\n if self.custom_field_ids is None:\n self.custom_field_ids = other.custom_field_ids\n elif other.custom_field_ids is not None:\n self.custom_field_ids.extend(other.custom_field_ids)\n self.custom_field_ids = list(set(self.custom_field_ids))\n\n return self\n\n @staticmethod\n def from_document(doc) -> \"DocumentMetadataOverrides\":\n \"\"\"\n Fills in the overrides from a document object\n \"\"\"\n overrides = DocumentMetadataOverrides()\n overrides.title = doc.title\n overrides.correspondent_id = doc.correspondent.id if doc.correspondent else None\n overrides.document_type_id = doc.document_type.id if doc.document_type else None\n overrides.storage_path_id = doc.storage_path.id if doc.storage_path else None\n overrides.owner_id = doc.owner.id if doc.owner else None\n overrides.tag_ids = list(doc.tags.values_list(\"id\", flat=True))\n\n overrides.view_users = get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n ).values_list(\"id\", flat=True)\n overrides.change_users = get_users_with_perms(\n doc,\n only_with_perms_in=[\"change_document\"],\n ).values_list(\"id\", flat=True)\n overrides.custom_field_ids = list(\n doc.custom_fields.values_list(\"id\", flat=True),\n )\n\n groups_with_perms = get_groups_with_perms(\n doc,\n attach_perms=True,\n )\n overrides.view_groups = [\n group.id for group, perms in groups_with_perms if \"view_document\" in perms\n ]\n overrides.change_groups = [\n group.id for group, perms in groups_with_perms if \"change_document\" in perms\n ]\n\n return overrides\n\n\nclass DocumentSource(IntEnum):\n \"\"\"\n The source of an incoming document. May have other uses in the future\n \"\"\"\n\n ConsumeFolder = 1\n ApiUpload = 2\n MailFetch = 3\n\n\[email protected]\nclass ConsumableDocument:\n \"\"\"\n Encapsulates an incoming document, either from consume folder, API upload\n or mail fetching and certain useful operations on it.\n \"\"\"\n\n source: DocumentSource\n original_file: Path\n mailrule_id: Optional[int] = None\n mime_type: str = dataclasses.field(init=False, default=None)\n\n def __post_init__(self):\n \"\"\"\n After a dataclass is initialized, this is called to finalize some data\n 1. Make sure the original path is an absolute, fully qualified path\n 2. Get the mime type of the file\n \"\"\"\n # Always fully qualify the path first thing\n # Just in case, convert to a path if it's a str\n self.original_file = Path(self.original_file).resolve()\n\n # Get the file type once at init\n # Note this function isn't called when the object is unpickled\n self.mime_type = magic.from_file(self.original_file, mime=True)\n", "path": "src/documents/data_models.py"}], "after_files": [{"content": "import dataclasses\nimport datetime\nfrom enum import IntEnum\nfrom pathlib import Path\nfrom typing import Optional\n\nimport magic\nfrom guardian.shortcuts import get_groups_with_perms\nfrom guardian.shortcuts import get_users_with_perms\n\n\[email protected]\nclass DocumentMetadataOverrides:\n \"\"\"\n Manages overrides for document fields which normally would\n be set from content or matching. All fields default to None,\n meaning no override is happening\n \"\"\"\n\n filename: Optional[str] = None\n title: Optional[str] = None\n correspondent_id: Optional[int] = None\n document_type_id: Optional[int] = None\n tag_ids: Optional[list[int]] = None\n storage_path_id: Optional[int] = None\n created: Optional[datetime.datetime] = None\n asn: Optional[int] = None\n owner_id: Optional[int] = None\n view_users: Optional[list[int]] = None\n view_groups: Optional[list[int]] = None\n change_users: Optional[list[int]] = None\n change_groups: Optional[list[int]] = None\n custom_field_ids: Optional[list[int]] = None\n\n def update(self, other: \"DocumentMetadataOverrides\") -> \"DocumentMetadataOverrides\":\n \"\"\"\n Merges two DocumentMetadataOverrides objects such that object B's overrides\n are applied to object A or merged if multiple are accepted.\n\n The update is an in-place modification of self\n \"\"\"\n # only if empty\n if other.title is not None:\n self.title = other.title\n if other.correspondent_id is not None:\n self.correspondent_id = other.correspondent_id\n if other.document_type_id is not None:\n self.document_type_id = other.document_type_id\n if other.storage_path_id is not None:\n self.storage_path_id = other.storage_path_id\n if other.owner_id is not None:\n self.owner_id = other.owner_id\n\n # merge\n if self.tag_ids is None:\n self.tag_ids = other.tag_ids\n elif other.tag_ids is not None:\n self.tag_ids.extend(other.tag_ids)\n self.tag_ids = list(set(self.tag_ids))\n\n if self.view_users is None:\n self.view_users = other.view_users\n elif other.view_users is not None:\n self.view_users.extend(other.view_users)\n self.view_users = list(set(self.view_users))\n\n if self.view_groups is None:\n self.view_groups = other.view_groups\n elif other.view_groups is not None:\n self.view_groups.extend(other.view_groups)\n self.view_groups = list(set(self.view_groups))\n\n if self.change_users is None:\n self.change_users = other.change_users\n elif other.change_users is not None:\n self.change_users.extend(other.change_users)\n self.change_users = list(set(self.change_users))\n\n if self.change_groups is None:\n self.change_groups = other.change_groups\n elif other.change_groups is not None:\n self.change_groups.extend(other.change_groups)\n self.change_groups = list(set(self.change_groups))\n\n if self.custom_field_ids is None:\n self.custom_field_ids = other.custom_field_ids\n elif other.custom_field_ids is not None:\n self.custom_field_ids.extend(other.custom_field_ids)\n self.custom_field_ids = list(set(self.custom_field_ids))\n\n return self\n\n @staticmethod\n def from_document(doc) -> \"DocumentMetadataOverrides\":\n \"\"\"\n Fills in the overrides from a document object\n \"\"\"\n overrides = DocumentMetadataOverrides()\n overrides.title = doc.title\n overrides.correspondent_id = doc.correspondent.id if doc.correspondent else None\n overrides.document_type_id = doc.document_type.id if doc.document_type else None\n overrides.storage_path_id = doc.storage_path.id if doc.storage_path else None\n overrides.owner_id = doc.owner.id if doc.owner else None\n overrides.tag_ids = list(doc.tags.values_list(\"id\", flat=True))\n\n overrides.view_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"view_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.change_users = list(\n get_users_with_perms(\n doc,\n only_with_perms_in=[\"change_document\"],\n ).values_list(\"id\", flat=True),\n )\n overrides.custom_field_ids = list(\n doc.custom_fields.values_list(\"id\", flat=True),\n )\n\n groups_with_perms = get_groups_with_perms(\n doc,\n attach_perms=True,\n )\n overrides.view_groups = [\n group.id\n for group in groups_with_perms\n if \"view_document\" in groups_with_perms[group]\n ]\n overrides.change_groups = [\n group.id\n for group in groups_with_perms\n if \"change_document\" in groups_with_perms[group]\n ]\n\n return overrides\n\n\nclass DocumentSource(IntEnum):\n \"\"\"\n The source of an incoming document. May have other uses in the future\n \"\"\"\n\n ConsumeFolder = 1\n ApiUpload = 2\n MailFetch = 3\n\n\[email protected]\nclass ConsumableDocument:\n \"\"\"\n Encapsulates an incoming document, either from consume folder, API upload\n or mail fetching and certain useful operations on it.\n \"\"\"\n\n source: DocumentSource\n original_file: Path\n mailrule_id: Optional[int] = None\n mime_type: str = dataclasses.field(init=False, default=None)\n\n def __post_init__(self):\n \"\"\"\n After a dataclass is initialized, this is called to finalize some data\n 1. Make sure the original path is an absolute, fully qualified path\n 2. Get the mime type of the file\n \"\"\"\n # Always fully qualify the path first thing\n # Just in case, convert to a path if it's a str\n self.original_file = Path(self.original_file).resolve()\n\n # Get the file type once at init\n # Note this function isn't called when the object is unpickled\n self.mime_type = magic.from_file(self.original_file, mime=True)\n", "path": "src/documents/data_models.py"}]}
| 2,473 | 397 |
gh_patches_debug_649
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.116
On the docket:
+ [x] The --resolve-local-platforms option does not work with --complete-platforms #1899
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.115"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.115"
+__version__ = "2.1.116"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.115\"\n+__version__ = \"2.1.116\"\n", "issue": "Release 2.1.116\nOn the docket:\r\n+ [x] The --resolve-local-platforms option does not work with --complete-platforms #1899\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.115\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.116\"\n", "path": "pex/version.py"}]}
| 348 | 98 |
gh_patches_debug_14330
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-5825
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version Scheme Change
We've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it.
### Alternate Choices
**Stay the course**
We've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice.
**SemVer**
Switch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position)
**Firefox/Chrome Versioning**
(From @alex)
"Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project.
Its demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5
6 __all__ = [
7 "__title__",
8 "__summary__",
9 "__uri__",
10 "__version__",
11 "__author__",
12 "__email__",
13 "__license__",
14 "__copyright__",
15 ]
16
17 __title__ = "cryptography"
18 __summary__ = (
19 "cryptography is a package which provides cryptographic recipes"
20 " and primitives to Python developers."
21 )
22 __uri__ = "https://github.com/pyca/cryptography"
23
24 __version__ = "3.5.dev1"
25
26 __author__ = "The Python Cryptographic Authority and individual contributors"
27 __email__ = "[email protected]"
28
29 __license__ = "BSD or Apache License, Version 2.0"
30 __copyright__ = "Copyright 2013-2021 {}".format(__author__)
31
```
Path: `vectors/cryptography_vectors/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 __all__ = [
6 "__title__",
7 "__summary__",
8 "__uri__",
9 "__version__",
10 "__author__",
11 "__email__",
12 "__license__",
13 "__copyright__",
14 ]
15
16 __title__ = "cryptography_vectors"
17 __summary__ = "Test vectors for the cryptography package."
18
19 __uri__ = "https://github.com/pyca/cryptography"
20
21 __version__ = "3.5.dev1"
22
23 __author__ = "The Python Cryptographic Authority and individual contributors"
24 __email__ = "[email protected]"
25
26 __license__ = "BSD or Apache License, Version 2.0"
27 __copyright__ = "Copyright 2013-2021 %s" % __author__
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py
--- a/src/cryptography/__about__.py
+++ b/src/cryptography/__about__.py
@@ -21,7 +21,7 @@
)
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.5.dev1"
+__version__ = "35.0.0.dev1"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py
--- a/vectors/cryptography_vectors/__about__.py
+++ b/vectors/cryptography_vectors/__about__.py
@@ -18,7 +18,7 @@
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "3.5.dev1"
+__version__ = "35.0.0.dev1"
__author__ = "The Python Cryptographic Authority and individual contributors"
__email__ = "[email protected]"
|
{"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -21,7 +21,7 @@\n )\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -18,7 +18,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"3.5.dev1\"\n+__version__ = \"35.0.0.dev1\"\n \n __author__ = \"The Python Cryptographic Authority and individual contributors\"\n __email__ = \"[email protected]\"\n", "issue": "Version Scheme Change\nWe've talked on and off for quite a few years about our versioning scheme for `cryptography`, but #5771 made it very clear that despite our [versioning documentation](https://cryptography.io/en/latest/api-stability.html#versioning) some users still assume it's [SemVer](https://semver.org) and are caught off guard by changes. I propose that we **switch to a [CalVer](https://calver.org) scheme** for the thirty fifth feature release (which we currently would call 3.5). This has the advantage of being unambiguously not semantic versioning but encoding some vaguely useful information (the year of release) in it.\r\n\r\n### Alternate Choices\r\n**Stay the course**\r\nWe've successfully used our versioning scheme for a bit over 7 years now and the probability of another monumental shift like this is low. There is, however, a constant (but low) background radiation of people who are tripped up by making assumptions about our versioning so I would argue against this choice.\r\n\r\n**SemVer**\r\nSwitch to using an actual semantic versioning scheme. Without re-litigating years of conversations, I don't believe either @alex or myself are interested in this option. (See https://github.com/pyca/cryptography/issues/5801#issuecomment-776067787 for a bit of elaboration on our position)\r\n\r\n**Firefox/Chrome Versioning**\r\n(From @alex)\r\n\"Its merits are that it's technically semver compatible, it's fairly predictable, and it communicates at least a little info on the relative age of the project.\r\n\r\nIts demerits are that it probably won't solve the real problem here, which was that folks were surprised a major change happened at all.\"\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"3.5.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\n \"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\"\n)\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"35.0.0.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 {}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n__all__ = [\n \"__title__\",\n \"__summary__\",\n \"__uri__\",\n \"__version__\",\n \"__author__\",\n \"__email__\",\n \"__license__\",\n \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"35.0.0.dev1\"\n\n__author__ = \"The Python Cryptographic Authority and individual contributors\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2021 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]}
| 1,176 | 252 |
gh_patches_debug_4829
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-2178
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reset in locales menu causes crash
Using reset in the _Locales_ menu will cause a crash.
```
Traceback (most recent call last):
File "/home/scripttest/archinstall/.venv/bin/archinstall", line 8, in <module>
sys.exit(run_as_a_module())
^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/__init__.py", line 291, in run_as_a_module
importlib.import_module(mod_name)
File "/usr/lib/python3.11/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/home/scripttest/archinstall/archinstall/scripts/guided.py", line 234, in <module>
ask_user_questions()
File "/home/scripttest/archinstall/archinstall/scripts/guided.py", line 99, in ask_user_questions
global_menu.run()
File "/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py", line 348, in run
if not self._process_selection(value):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py", line 365, in _process_selection
return self.exec_option(config_name, selector)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py", line 386, in exec_option
result = selector.func(presel_val)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/global_menu.py", line 53, in <lambda>
lambda preset: self._locale_selection(preset),
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/global_menu.py", line 246, in _locale_selection
locale_config = LocaleMenu(data_store, preset).run()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/scripttest/archinstall/archinstall/lib/locale/locale_menu.py", line 84, in run
self._data_store['keyboard-layout'],
~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^
KeyError: 'keyboard-layout'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/locale/locale_menu.py`
Content:
```
1 from dataclasses import dataclass
2 from typing import Dict, Any, TYPE_CHECKING, Optional
3
4 from .utils import list_keyboard_languages, list_locales, set_kb_layout
5 from ..menu import Selector, AbstractSubMenu, MenuSelectionType, Menu
6
7 if TYPE_CHECKING:
8 _: Any
9
10
11 @dataclass
12 class LocaleConfiguration:
13 kb_layout: str
14 sys_lang: str
15 sys_enc: str
16
17 @staticmethod
18 def default() -> 'LocaleConfiguration':
19 return LocaleConfiguration('us', 'en_US', 'UTF-8')
20
21 def json(self) -> Dict[str, str]:
22 return {
23 'kb_layout': self.kb_layout,
24 'sys_lang': self.sys_lang,
25 'sys_enc': self.sys_enc
26 }
27
28 @classmethod
29 def _load_config(cls, config: 'LocaleConfiguration', args: Dict[str, Any]) -> 'LocaleConfiguration':
30 if 'sys_lang' in args:
31 config.sys_lang = args['sys_lang']
32 if 'sys_enc' in args:
33 config.sys_enc = args['sys_enc']
34 if 'kb_layout' in args:
35 config.kb_layout = args['kb_layout']
36
37 return config
38
39 @classmethod
40 def parse_arg(cls, args: Dict[str, Any]) -> 'LocaleConfiguration':
41 default = cls.default()
42
43 if 'locale_config' in args:
44 default = cls._load_config(default, args['locale_config'])
45 else:
46 default = cls._load_config(default, args)
47
48 return default
49
50
51 class LocaleMenu(AbstractSubMenu):
52 def __init__(
53 self,
54 data_store: Dict[str, Any],
55 locale_conf: LocaleConfiguration
56 ):
57 self._preset = locale_conf
58 super().__init__(data_store=data_store)
59
60 def setup_selection_menu_options(self):
61 self._menu_options['keyboard-layout'] = \
62 Selector(
63 _('Keyboard layout'),
64 lambda preset: self._select_kb_layout(preset),
65 default=self._preset.kb_layout,
66 enabled=True)
67 self._menu_options['sys-language'] = \
68 Selector(
69 _('Locale language'),
70 lambda preset: select_locale_lang(preset),
71 default=self._preset.sys_lang,
72 enabled=True)
73 self._menu_options['sys-encoding'] = \
74 Selector(
75 _('Locale encoding'),
76 lambda preset: select_locale_enc(preset),
77 default=self._preset.sys_enc,
78 enabled=True)
79
80 def run(self, allow_reset: bool = True) -> LocaleConfiguration:
81 super().run(allow_reset=allow_reset)
82
83 return LocaleConfiguration(
84 self._data_store['keyboard-layout'],
85 self._data_store['sys-language'],
86 self._data_store['sys-encoding']
87 )
88
89 def _select_kb_layout(self, preset: Optional[str]) -> Optional[str]:
90 kb_lang = select_kb_layout(preset)
91 if kb_lang:
92 set_kb_layout(kb_lang)
93 return kb_lang
94
95
96 def select_locale_lang(preset: Optional[str] = None) -> Optional[str]:
97 locales = list_locales()
98 locale_lang = set([locale.split()[0] for locale in locales])
99
100 choice = Menu(
101 _('Choose which locale language to use'),
102 list(locale_lang),
103 sort=True,
104 preset_values=preset
105 ).run()
106
107 match choice.type_:
108 case MenuSelectionType.Selection: return choice.single_value
109 case MenuSelectionType.Skip: return preset
110
111 return None
112
113
114 def select_locale_enc(preset: Optional[str] = None) -> Optional[str]:
115 locales = list_locales()
116 locale_enc = set([locale.split()[1] for locale in locales])
117
118 choice = Menu(
119 _('Choose which locale encoding to use'),
120 list(locale_enc),
121 sort=True,
122 preset_values=preset
123 ).run()
124
125 match choice.type_:
126 case MenuSelectionType.Selection: return choice.single_value
127 case MenuSelectionType.Skip: return preset
128
129 return None
130
131
132 def select_kb_layout(preset: Optional[str] = None) -> Optional[str]:
133 """
134 Asks the user to select a language
135 Usually this is combined with :ref:`archinstall.list_keyboard_languages`.
136
137 :return: The language/dictionary key of the selected language
138 :rtype: str
139 """
140 kb_lang = list_keyboard_languages()
141 # sort alphabetically and then by length
142 sorted_kb_lang = sorted(kb_lang, key=lambda x: (len(x), x))
143
144 choice = Menu(
145 _('Select keyboard layout'),
146 sorted_kb_lang,
147 preset_values=preset,
148 sort=False
149 ).run()
150
151 match choice.type_:
152 case MenuSelectionType.Skip: return preset
153 case MenuSelectionType.Selection: return choice.single_value
154
155 return None
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/lib/locale/locale_menu.py b/archinstall/lib/locale/locale_menu.py
--- a/archinstall/lib/locale/locale_menu.py
+++ b/archinstall/lib/locale/locale_menu.py
@@ -80,6 +80,9 @@
def run(self, allow_reset: bool = True) -> LocaleConfiguration:
super().run(allow_reset=allow_reset)
+ if not self._data_store:
+ return LocaleConfiguration.default()
+
return LocaleConfiguration(
self._data_store['keyboard-layout'],
self._data_store['sys-language'],
|
{"golden_diff": "diff --git a/archinstall/lib/locale/locale_menu.py b/archinstall/lib/locale/locale_menu.py\n--- a/archinstall/lib/locale/locale_menu.py\n+++ b/archinstall/lib/locale/locale_menu.py\n@@ -80,6 +80,9 @@\n \tdef run(self, allow_reset: bool = True) -> LocaleConfiguration:\n \t\tsuper().run(allow_reset=allow_reset)\n \n+\t\tif not self._data_store:\n+\t\t\treturn LocaleConfiguration.default()\n+\n \t\treturn LocaleConfiguration(\n \t\t\tself._data_store['keyboard-layout'],\n \t\t\tself._data_store['sys-language'],\n", "issue": "Reset in locales menu causes crash\nUsing reset in the _Locales_ menu will cause a crash.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/scripttest/archinstall/.venv/bin/archinstall\", line 8, in <module>\r\n sys.exit(run_as_a_module())\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/__init__.py\", line 291, in run_as_a_module\r\n importlib.import_module(mod_name)\r\n File \"/usr/lib/python3.11/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"<frozen importlib._bootstrap>\", line 1204, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 1176, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 1147, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 690, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 940, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 241, in _call_with_frames_removed\r\n File \"/home/scripttest/archinstall/archinstall/scripts/guided.py\", line 234, in <module>\r\n ask_user_questions()\r\n File \"/home/scripttest/archinstall/archinstall/scripts/guided.py\", line 99, in ask_user_questions\r\n global_menu.run()\r\n File \"/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py\", line 348, in run\r\n if not self._process_selection(value):\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py\", line 365, in _process_selection\r\n return self.exec_option(config_name, selector)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/lib/menu/abstract_menu.py\", line 386, in exec_option\r\n result = selector.func(presel_val)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/lib/global_menu.py\", line 53, in <lambda>\r\n lambda preset: self._locale_selection(preset),\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/lib/global_menu.py\", line 246, in _locale_selection\r\n locale_config = LocaleMenu(data_store, preset).run()\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/scripttest/archinstall/archinstall/lib/locale/locale_menu.py\", line 84, in run\r\n self._data_store['keyboard-layout'],\r\n ~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^\r\nKeyError: 'keyboard-layout'\r\n```\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Dict, Any, TYPE_CHECKING, Optional\n\nfrom .utils import list_keyboard_languages, list_locales, set_kb_layout\nfrom ..menu import Selector, AbstractSubMenu, MenuSelectionType, Menu\n\nif TYPE_CHECKING:\n\t_: Any\n\n\n@dataclass\nclass LocaleConfiguration:\n\tkb_layout: str\n\tsys_lang: str\n\tsys_enc: str\n\n\t@staticmethod\n\tdef default() -> 'LocaleConfiguration':\n\t\treturn LocaleConfiguration('us', 'en_US', 'UTF-8')\n\n\tdef json(self) -> Dict[str, str]:\n\t\treturn {\n\t\t\t'kb_layout': self.kb_layout,\n\t\t\t'sys_lang': self.sys_lang,\n\t\t\t'sys_enc': self.sys_enc\n\t\t}\n\n\t@classmethod\n\tdef _load_config(cls, config: 'LocaleConfiguration', args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tif 'sys_lang' in args:\n\t\t\tconfig.sys_lang = args['sys_lang']\n\t\tif 'sys_enc' in args:\n\t\t\tconfig.sys_enc = args['sys_enc']\n\t\tif 'kb_layout' in args:\n\t\t\tconfig.kb_layout = args['kb_layout']\n\n\t\treturn config\n\n\t@classmethod\n\tdef parse_arg(cls, args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tdefault = cls.default()\n\n\t\tif 'locale_config' in args:\n\t\t\tdefault = cls._load_config(default, args['locale_config'])\n\t\telse:\n\t\t\tdefault = cls._load_config(default, args)\n\n\t\treturn default\n\n\nclass LocaleMenu(AbstractSubMenu):\n\tdef __init__(\n\t\tself,\n\t\tdata_store: Dict[str, Any],\n\t\tlocale_conf: LocaleConfiguration\n\t):\n\t\tself._preset = locale_conf\n\t\tsuper().__init__(data_store=data_store)\n\n\tdef setup_selection_menu_options(self):\n\t\tself._menu_options['keyboard-layout'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Keyboard layout'),\n\t\t\t\tlambda preset: self._select_kb_layout(preset),\n\t\t\t\tdefault=self._preset.kb_layout,\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-language'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale language'),\n\t\t\t\tlambda preset: select_locale_lang(preset),\n\t\t\t\tdefault=self._preset.sys_lang,\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-encoding'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale encoding'),\n\t\t\t\tlambda preset: select_locale_enc(preset),\n\t\t\t\tdefault=self._preset.sys_enc,\n\t\t\t\tenabled=True)\n\n\tdef run(self, allow_reset: bool = True) -> LocaleConfiguration:\n\t\tsuper().run(allow_reset=allow_reset)\n\n\t\treturn LocaleConfiguration(\n\t\t\tself._data_store['keyboard-layout'],\n\t\t\tself._data_store['sys-language'],\n\t\t\tself._data_store['sys-encoding']\n\t\t)\n\n\tdef _select_kb_layout(self, preset: Optional[str]) -> Optional[str]:\n\t\tkb_lang = select_kb_layout(preset)\n\t\tif kb_lang:\n\t\t\tset_kb_layout(kb_lang)\n\t\treturn kb_lang\n\n\ndef select_locale_lang(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_lang = set([locale.split()[0] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale language to use'),\n\t\tlist(locale_lang),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_locale_enc(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_enc = set([locale.split()[1] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale encoding to use'),\n\t\tlist(locale_enc),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_kb_layout(preset: Optional[str] = None) -> Optional[str]:\n\t\"\"\"\n\tAsks the user to select a language\n\tUsually this is combined with :ref:`archinstall.list_keyboard_languages`.\n\n\t:return: The language/dictionary key of the selected language\n\t:rtype: str\n\t\"\"\"\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(kb_lang, key=lambda x: (len(x), x))\n\n\tchoice = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset,\n\t\tsort=False\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n", "path": "archinstall/lib/locale/locale_menu.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom typing import Dict, Any, TYPE_CHECKING, Optional\n\nfrom .utils import list_keyboard_languages, list_locales, set_kb_layout\nfrom ..menu import Selector, AbstractSubMenu, MenuSelectionType, Menu\n\nif TYPE_CHECKING:\n\t_: Any\n\n\n@dataclass\nclass LocaleConfiguration:\n\tkb_layout: str\n\tsys_lang: str\n\tsys_enc: str\n\n\t@staticmethod\n\tdef default() -> 'LocaleConfiguration':\n\t\treturn LocaleConfiguration('us', 'en_US', 'UTF-8')\n\n\tdef json(self) -> Dict[str, str]:\n\t\treturn {\n\t\t\t'kb_layout': self.kb_layout,\n\t\t\t'sys_lang': self.sys_lang,\n\t\t\t'sys_enc': self.sys_enc\n\t\t}\n\n\t@classmethod\n\tdef _load_config(cls, config: 'LocaleConfiguration', args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tif 'sys_lang' in args:\n\t\t\tconfig.sys_lang = args['sys_lang']\n\t\tif 'sys_enc' in args:\n\t\t\tconfig.sys_enc = args['sys_enc']\n\t\tif 'kb_layout' in args:\n\t\t\tconfig.kb_layout = args['kb_layout']\n\n\t\treturn config\n\n\t@classmethod\n\tdef parse_arg(cls, args: Dict[str, Any]) -> 'LocaleConfiguration':\n\t\tdefault = cls.default()\n\n\t\tif 'locale_config' in args:\n\t\t\tdefault = cls._load_config(default, args['locale_config'])\n\t\telse:\n\t\t\tdefault = cls._load_config(default, args)\n\n\t\treturn default\n\n\nclass LocaleMenu(AbstractSubMenu):\n\tdef __init__(\n\t\tself,\n\t\tdata_store: Dict[str, Any],\n\t\tlocale_conf: LocaleConfiguration\n\t):\n\t\tself._preset = locale_conf\n\t\tsuper().__init__(data_store=data_store)\n\n\tdef setup_selection_menu_options(self):\n\t\tself._menu_options['keyboard-layout'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Keyboard layout'),\n\t\t\t\tlambda preset: self._select_kb_layout(preset),\n\t\t\t\tdefault=self._preset.kb_layout,\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-language'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale language'),\n\t\t\t\tlambda preset: select_locale_lang(preset),\n\t\t\t\tdefault=self._preset.sys_lang,\n\t\t\t\tenabled=True)\n\t\tself._menu_options['sys-encoding'] = \\\n\t\t\tSelector(\n\t\t\t\t_('Locale encoding'),\n\t\t\t\tlambda preset: select_locale_enc(preset),\n\t\t\t\tdefault=self._preset.sys_enc,\n\t\t\t\tenabled=True)\n\n\tdef run(self, allow_reset: bool = True) -> LocaleConfiguration:\n\t\tsuper().run(allow_reset=allow_reset)\n\n\t\tif not self._data_store:\n\t\t\treturn LocaleConfiguration.default()\n\n\t\treturn LocaleConfiguration(\n\t\t\tself._data_store['keyboard-layout'],\n\t\t\tself._data_store['sys-language'],\n\t\t\tself._data_store['sys-encoding']\n\t\t)\n\n\tdef _select_kb_layout(self, preset: Optional[str]) -> Optional[str]:\n\t\tkb_lang = select_kb_layout(preset)\n\t\tif kb_lang:\n\t\t\tset_kb_layout(kb_lang)\n\t\treturn kb_lang\n\n\ndef select_locale_lang(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_lang = set([locale.split()[0] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale language to use'),\n\t\tlist(locale_lang),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_locale_enc(preset: Optional[str] = None) -> Optional[str]:\n\tlocales = list_locales()\n\tlocale_enc = set([locale.split()[1] for locale in locales])\n\n\tchoice = Menu(\n\t\t_('Choose which locale encoding to use'),\n\t\tlist(locale_enc),\n\t\tsort=True,\n\t\tpreset_values=preset\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\t\tcase MenuSelectionType.Skip: return preset\n\n\treturn None\n\n\ndef select_kb_layout(preset: Optional[str] = None) -> Optional[str]:\n\t\"\"\"\n\tAsks the user to select a language\n\tUsually this is combined with :ref:`archinstall.list_keyboard_languages`.\n\n\t:return: The language/dictionary key of the selected language\n\t:rtype: str\n\t\"\"\"\n\tkb_lang = list_keyboard_languages()\n\t# sort alphabetically and then by length\n\tsorted_kb_lang = sorted(kb_lang, key=lambda x: (len(x), x))\n\n\tchoice = Menu(\n\t\t_('Select keyboard layout'),\n\t\tsorted_kb_lang,\n\t\tpreset_values=preset,\n\t\tsort=False\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Skip: return preset\n\t\tcase MenuSelectionType.Selection: return choice.single_value\n\n\treturn None\n", "path": "archinstall/lib/locale/locale_menu.py"}]}
| 2,347 | 120 |
gh_patches_debug_34172
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-5943
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parsing GBM files for GBMSummaryTimeSeries takes too long
### Describe the performance issue
When reading in the Fermi GBM CSPEC and CTIME fits files to create a timeseries, the parsing takes a very long time.
This can be up to 3 minutes for the CTIME files 😱 which have a very high cadence.
This is due to t he nested loop here:
https://github.com/sunpy/sunpy/blob/main/sunpy/timeseries/sources/fermi_gbm.py#L215
It loops over the time index which is really inefficient.
### To Reproduce
```python
>>> from sunpy import timeseries as ts
>>> import time
>>> t1 = time.time()
>>> gbm_cspec = ts.TimeSeries("glg_cspec_n0_220120_v00.pha")
>>> print("Time taken to read file", time.time() - t1)
Time taken to read file 11.445274829864502
>>> t1 = time.time()
>>> gbm_ctime = ts.TimeSeries("glg_ctime_n0_220120_v00.pha")
>>> print("Time taken to read file", time.time() - t1)
Time taken to read file 188.96947813034058 😱
```
### Proposed fix
array divisions rather than looping, I'll make a PR now
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/timeseries/sources/fermi_gbm.py`
Content:
```
1 """
2 This module FERMI GBM `~sunpy.timeseries.TimeSeries` source.
3 """
4 from collections import OrderedDict
5
6 import matplotlib.pyplot as plt
7 import numpy as np
8 import pandas as pd
9
10 import astropy.units as u
11 from astropy.time import TimeDelta
12
13 import sunpy.io
14 from sunpy.time import parse_time
15 from sunpy.timeseries.timeseriesbase import GenericTimeSeries
16 from sunpy.util.metadata import MetaDict
17 from sunpy.visualization import peek_show
18
19 __all__ = ['GBMSummaryTimeSeries']
20
21
22 class GBMSummaryTimeSeries(GenericTimeSeries):
23 """
24 Fermi/GBM Summary lightcurve TimeSeries.
25
26 The Gamma-ray Burst Monitor (GBM) is an instrument on board Fermi.
27 It is meant to detect gamma-ray bursts but also detects solar flares.
28 It consists of 12 Sodium Iodide (NaI) scintillation detectors and 2 Bismuth Germanate (BGO) scintillation detectors.
29 The NaI detectors cover from a few keV to about 1 MeV and provide burst triggers and locations.
30 The BGO detectors cover the energy range from about 150 keV to about 30 MeV.
31
32 This summary lightcurve makes use of the CSPEC (daily version) data set which consists of the counts
33 accumulated every 4.096 seconds in 128 energy channels for each of the 14 detectors.
34 Note that the data is re-binned from the original 128 into the following 8 pre-determined energy channels.
35
36 * 4-15 keV
37 * 15-25 keV
38 * 25-50 keV
39 * 50-100 keV
40 * 100-300 keV
41 * 300-800 keV
42 * 800-2000 keV
43
44 Examples
45 --------
46 >>> import sunpy.timeseries
47 >>> import sunpy.data.sample # doctest: +REMOTE_DATA
48 >>> gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary') # doctest: +REMOTE_DATA
49 >>> gbm.peek() # doctest: +SKIP
50
51 References
52 ----------
53 * `Fermi Mission Homepage <https://fermi.gsfc.nasa.gov>`_
54 * `Fermi GBM Homepage <https://fermi.gsfc.nasa.gov/science/instruments/gbm.html>`_
55 * `Fermi Science Support Center <https://fermi.gsfc.nasa.gov/ssc/>`_
56 * `Fermi Data Product <https://fermi.gsfc.nasa.gov/ssc/data/access/>`_
57 * `GBM Instrument Papers <https://gammaray.nsstc.nasa.gov/gbm/publications/instrument_journal_gbm.html>`_
58 """
59 # Class attribute used to specify the source class of the TimeSeries.
60 _source = 'gbmsummary'
61
62 def plot(self, axes=None, **kwargs):
63 """
64 Plots the GBM timeseries.
65
66 Parameters
67 ----------
68 axes : `matplotlib.axes.Axes`, optional
69 The axes on which to plot the TimeSeries. Defaults to current axes.
70 **kwargs : `dict`
71 Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`
72 functions.
73
74 Returns
75 -------
76 `~matplotlib.axes.Axes`
77 The plot axes.
78 """
79 self._validate_data_for_plotting()
80 if axes is None:
81 axes = plt.gca()
82 data_lab = self.to_dataframe().columns.values
83 for d in data_lab:
84 axes.plot(self.to_dataframe().index, self.to_dataframe()[d], label=d, **kwargs)
85 axes.set_yscale("log")
86 axes.set_xlabel('Start time: ' + self.to_dataframe().index[0].strftime('%Y-%m-%d %H:%M:%S UT'))
87 axes.set_ylabel('Counts/s/keV')
88 axes.legend()
89 return axes
90
91 @peek_show
92 def peek(self, title=None, **kwargs):
93 """
94 Displays the GBM timeseries by calling
95 `~sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries.plot`.
96
97 .. plot::
98
99 import sunpy.timeseries
100 import sunpy.data.sample
101 gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary')
102 gbm.peek()
103
104 Parameters
105 ----------
106 title : `str`, optional
107 The title of the plot.
108 **kwargs : `dict`
109 Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`
110 functions.
111 """
112 if title is None:
113 title = 'Fermi GBM Summary data ' + str(self.meta.get('DETNAM').values())
114 fig, ax = plt.subplots()
115 axes = self.plot(axes=ax, **kwargs)
116 axes.set_title(title)
117 fig.autofmt_xdate()
118 return fig
119
120 @classmethod
121 def _parse_file(cls, filepath):
122 """
123 Parses a GBM CSPEC FITS file.
124
125 Parameters
126 ----------
127 filepath : `str`
128 The path to the file you want to parse.
129 """
130 hdus = sunpy.io.read_file(filepath)
131 return cls._parse_hdus(hdus)
132
133 @classmethod
134 def _parse_hdus(cls, hdulist):
135 """
136 Parses a GBM CSPEC `astropy.io.fits.HDUList`.
137
138 Parameters
139 ----------
140 hdulist : `str`
141 The path to the file you want to parse.
142 """
143 header = MetaDict(OrderedDict(hdulist[0].header))
144 # these GBM files have three FITS extensions.
145 # extn1 - this gives the energy range for each of the 128 energy bins
146 # extn2 - this contains the data, e.g. counts, exposure time, time of observation
147 # extn3 - eclipse times?
148 energy_bins = hdulist[1].data
149 count_data = hdulist[2].data
150
151 # rebin the 128 energy channels into some summary ranges
152 # 4-15 keV, 15 - 25 keV, 25-50 keV, 50-100 keV, 100-300 keV, 300-800 keV, 800 - 2000 keV
153 # put the data in the units of counts/s/keV
154 summary_counts = _bin_data_for_summary(energy_bins, count_data)
155
156 # get the time information in datetime format with the correct MET adjustment
157 met_ref_time = parse_time('2001-01-01 00:00') # Mission elapsed time
158 gbm_times = met_ref_time + TimeDelta(count_data['time'], format='sec')
159 gbm_times.precision = 9
160 gbm_times = gbm_times.isot.astype('datetime64')
161
162 column_labels = ['4-15 keV', '15-25 keV', '25-50 keV', '50-100 keV',
163 '100-300 keV', '300-800 keV', '800-2000 keV']
164
165 # Add the units data
166 units = OrderedDict([('4-15 keV', u.ct / u.s / u.keV), ('15-25 keV', u.ct / u.s / u.keV),
167 ('25-50 keV', u.ct / u.s / u.keV), ('50-100 keV', u.ct / u.s / u.keV),
168 ('100-300 keV', u.ct / u.s / u.keV), ('300-800 keV', u.ct / u.s / u.keV),
169 ('800-2000 keV', u.ct / u.s / u.keV)])
170 return pd.DataFrame(summary_counts, columns=column_labels, index=gbm_times), header, units
171
172 @classmethod
173 def is_datasource_for(cls, **kwargs):
174 """
175 Determines if the file corresponds to a GBM summary lightcurve
176 `~sunpy.timeseries.TimeSeries`.
177 """
178 # Check if source is explicitly assigned
179 if 'source' in kwargs.keys():
180 if kwargs.get('source', ''):
181 return kwargs.get('source', '').lower().startswith(cls._source)
182 # Check if HDU defines the source instrument
183 if 'meta' in kwargs.keys():
184 return kwargs['meta'].get('INSTRUME', '').startswith('GBM')
185
186
187 def _bin_data_for_summary(energy_bins, count_data):
188 """
189 Rebin the 128 energy channels into some summary ranges and put the data in
190 the units of counts/s/keV.
191
192 Bin ranges used:
193 * 4-15 keV
194 * 15-25 keV
195 * 25-50 keV
196 * 50-100 keV
197 * 100-300 keV
198 * 300-800 keV
199 * 800-2000 keV
200
201 Parameters
202 ----------
203 energy_bins : `numpy.ndarray`
204 The array of energy bins to rebin.
205 count_data : `numpy.ndarray`
206 The array of count data to rebin.
207 """
208 # find the indices corresponding to some standard summary energy bins
209 ebands = [4, 15, 25, 50, 100, 300, 800, 2000]
210 indices = []
211 for e in ebands:
212 indices.append(np.searchsorted(energy_bins['e_max'], e))
213
214 summary_counts = []
215 for i in range(0, len(count_data['counts'])):
216 counts_in_bands = []
217 for j in range(1, len(ebands)):
218 counts_in_bands.append(
219 np.sum(count_data['counts'][i][indices[j - 1]:indices[j]]) /
220 (count_data['exposure'][i] *
221 (energy_bins['e_max'][indices[j]] -
222 energy_bins['e_min'][indices[j - 1]])))
223
224 summary_counts.append(counts_in_bands)
225
226 return summary_counts
227
228
229 def _parse_detector(detector):
230 """
231 Check and fix detector name strings.
232
233 Parameters
234 ----------
235 detector : `str`
236 The detector name to check.
237 """
238 oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',
239 'n10', 'n11']
240 altlist = [str(i) for i in range(12)]
241 if detector in oklist:
242 return detector
243 elif detector in altlist:
244 return 'n' + detector
245 else:
246 raise ValueError('Detector string could not be interpreted')
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/timeseries/sources/fermi_gbm.py b/sunpy/timeseries/sources/fermi_gbm.py
--- a/sunpy/timeseries/sources/fermi_gbm.py
+++ b/sunpy/timeseries/sources/fermi_gbm.py
@@ -31,7 +31,12 @@
This summary lightcurve makes use of the CSPEC (daily version) data set which consists of the counts
accumulated every 4.096 seconds in 128 energy channels for each of the 14 detectors.
+
Note that the data is re-binned from the original 128 into the following 8 pre-determined energy channels.
+ The rebinning method treats the counts in each of the original 128 channels as
+ all having the energy of the average energy of that channel. For example, the
+ counts in an 14.5--15.6 keV original channel would all be accumulated into the
+ 15--25 keV rebinned channel.
* 4-15 keV
* 15-25 keV
@@ -205,25 +210,20 @@
count_data : `numpy.ndarray`
The array of count data to rebin.
"""
- # find the indices corresponding to some standard summary energy bins
+
+ # list of energy bands to sum between
ebands = [4, 15, 25, 50, 100, 300, 800, 2000]
- indices = []
- for e in ebands:
- indices.append(np.searchsorted(energy_bins['e_max'], e))
+ e_center = (energy_bins['e_min'] + energy_bins['e_max']) / 2
+ indices = [np.searchsorted(e_center, e) for e in ebands]
summary_counts = []
- for i in range(0, len(count_data['counts'])):
- counts_in_bands = []
- for j in range(1, len(ebands)):
- counts_in_bands.append(
- np.sum(count_data['counts'][i][indices[j - 1]:indices[j]]) /
- (count_data['exposure'][i] *
- (energy_bins['e_max'][indices[j]] -
- energy_bins['e_min'][indices[j - 1]])))
-
- summary_counts.append(counts_in_bands)
-
- return summary_counts
+ for ind_start, ind_end in zip(indices[:-1], indices[1:]):
+ # sum the counts in the energy bands, and find counts/s/keV
+ summed_counts = np.sum(count_data["counts"][:, ind_start:ind_end], axis=1)
+ energy_width = (energy_bins["e_max"][ind_end - 1] - energy_bins["e_min"][ind_start])
+ summary_counts.append(summed_counts/energy_width/count_data["exposure"])
+
+ return np.array(summary_counts).T
def _parse_detector(detector):
|
{"golden_diff": "diff --git a/sunpy/timeseries/sources/fermi_gbm.py b/sunpy/timeseries/sources/fermi_gbm.py\n--- a/sunpy/timeseries/sources/fermi_gbm.py\n+++ b/sunpy/timeseries/sources/fermi_gbm.py\n@@ -31,7 +31,12 @@\n \n This summary lightcurve makes use of the CSPEC (daily version) data set which consists of the counts\n accumulated every 4.096 seconds in 128 energy channels for each of the 14 detectors.\n+\n Note that the data is re-binned from the original 128 into the following 8 pre-determined energy channels.\n+ The rebinning method treats the counts in each of the original 128 channels as\n+ all having the energy of the average energy of that channel. For example, the\n+ counts in an 14.5--15.6 keV original channel would all be accumulated into the\n+ 15--25 keV rebinned channel.\n \n * 4-15 keV\n * 15-25 keV\n@@ -205,25 +210,20 @@\n count_data : `numpy.ndarray`\n The array of count data to rebin.\n \"\"\"\n- # find the indices corresponding to some standard summary energy bins\n+\n+ # list of energy bands to sum between\n ebands = [4, 15, 25, 50, 100, 300, 800, 2000]\n- indices = []\n- for e in ebands:\n- indices.append(np.searchsorted(energy_bins['e_max'], e))\n+ e_center = (energy_bins['e_min'] + energy_bins['e_max']) / 2\n+ indices = [np.searchsorted(e_center, e) for e in ebands]\n \n summary_counts = []\n- for i in range(0, len(count_data['counts'])):\n- counts_in_bands = []\n- for j in range(1, len(ebands)):\n- counts_in_bands.append(\n- np.sum(count_data['counts'][i][indices[j - 1]:indices[j]]) /\n- (count_data['exposure'][i] *\n- (energy_bins['e_max'][indices[j]] -\n- energy_bins['e_min'][indices[j - 1]])))\n-\n- summary_counts.append(counts_in_bands)\n-\n- return summary_counts\n+ for ind_start, ind_end in zip(indices[:-1], indices[1:]):\n+ # sum the counts in the energy bands, and find counts/s/keV\n+ summed_counts = np.sum(count_data[\"counts\"][:, ind_start:ind_end], axis=1)\n+ energy_width = (energy_bins[\"e_max\"][ind_end - 1] - energy_bins[\"e_min\"][ind_start])\n+ summary_counts.append(summed_counts/energy_width/count_data[\"exposure\"])\n+\n+ return np.array(summary_counts).T\n \n \n def _parse_detector(detector):\n", "issue": "Parsing GBM files for GBMSummaryTimeSeries takes too long\n### Describe the performance issue\n\nWhen reading in the Fermi GBM CSPEC and CTIME fits files to create a timeseries, the parsing takes a very long time. \r\nThis can be up to 3 minutes for the CTIME files \ud83d\ude31 which have a very high cadence.\r\n\r\nThis is due to t he nested loop here:\r\n\r\nhttps://github.com/sunpy/sunpy/blob/main/sunpy/timeseries/sources/fermi_gbm.py#L215\r\n\r\n\r\nIt loops over the time index which is really inefficient.\n\n### To Reproduce\n\n```python\r\n>>> from sunpy import timeseries as ts\r\n>>> import time\r\n\r\n>>> t1 = time.time()\r\n>>> gbm_cspec = ts.TimeSeries(\"glg_cspec_n0_220120_v00.pha\")\r\n>>> print(\"Time taken to read file\", time.time() - t1)\r\nTime taken to read file 11.445274829864502\r\n\r\n>>> t1 = time.time()\r\n>>> gbm_ctime = ts.TimeSeries(\"glg_ctime_n0_220120_v00.pha\")\r\n>>> print(\"Time taken to read file\", time.time() - t1)\r\nTime taken to read file 188.96947813034058 \ud83d\ude31 \r\n```\r\n\r\n\n\n### Proposed fix\n\narray divisions rather than looping, I'll make a PR now\n", "before_files": [{"content": "\"\"\"\nThis module FERMI GBM `~sunpy.timeseries.TimeSeries` source.\n\"\"\"\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport astropy.units as u\nfrom astropy.time import TimeDelta\n\nimport sunpy.io\nfrom sunpy.time import parse_time\nfrom sunpy.timeseries.timeseriesbase import GenericTimeSeries\nfrom sunpy.util.metadata import MetaDict\nfrom sunpy.visualization import peek_show\n\n__all__ = ['GBMSummaryTimeSeries']\n\n\nclass GBMSummaryTimeSeries(GenericTimeSeries):\n \"\"\"\n Fermi/GBM Summary lightcurve TimeSeries.\n\n The Gamma-ray Burst Monitor (GBM) is an instrument on board Fermi.\n It is meant to detect gamma-ray bursts but also detects solar flares.\n It consists of 12 Sodium Iodide (NaI) scintillation detectors and 2 Bismuth Germanate (BGO) scintillation detectors.\n The NaI detectors cover from a few keV to about 1 MeV and provide burst triggers and locations.\n The BGO detectors cover the energy range from about 150 keV to about 30 MeV.\n\n This summary lightcurve makes use of the CSPEC (daily version) data set which consists of the counts\n accumulated every 4.096 seconds in 128 energy channels for each of the 14 detectors.\n Note that the data is re-binned from the original 128 into the following 8 pre-determined energy channels.\n\n * 4-15 keV\n * 15-25 keV\n * 25-50 keV\n * 50-100 keV\n * 100-300 keV\n * 300-800 keV\n * 800-2000 keV\n\n Examples\n --------\n >>> import sunpy.timeseries\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary') # doctest: +REMOTE_DATA\n >>> gbm.peek() # doctest: +SKIP\n\n References\n ----------\n * `Fermi Mission Homepage <https://fermi.gsfc.nasa.gov>`_\n * `Fermi GBM Homepage <https://fermi.gsfc.nasa.gov/science/instruments/gbm.html>`_\n * `Fermi Science Support Center <https://fermi.gsfc.nasa.gov/ssc/>`_\n * `Fermi Data Product <https://fermi.gsfc.nasa.gov/ssc/data/access/>`_\n * `GBM Instrument Papers <https://gammaray.nsstc.nasa.gov/gbm/publications/instrument_journal_gbm.html>`_\n \"\"\"\n # Class attribute used to specify the source class of the TimeSeries.\n _source = 'gbmsummary'\n\n def plot(self, axes=None, **kwargs):\n \"\"\"\n Plots the GBM timeseries.\n\n Parameters\n ----------\n axes : `matplotlib.axes.Axes`, optional\n The axes on which to plot the TimeSeries. Defaults to current axes.\n **kwargs : `dict`\n Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`\n functions.\n\n Returns\n -------\n `~matplotlib.axes.Axes`\n The plot axes.\n \"\"\"\n self._validate_data_for_plotting()\n if axes is None:\n axes = plt.gca()\n data_lab = self.to_dataframe().columns.values\n for d in data_lab:\n axes.plot(self.to_dataframe().index, self.to_dataframe()[d], label=d, **kwargs)\n axes.set_yscale(\"log\")\n axes.set_xlabel('Start time: ' + self.to_dataframe().index[0].strftime('%Y-%m-%d %H:%M:%S UT'))\n axes.set_ylabel('Counts/s/keV')\n axes.legend()\n return axes\n\n @peek_show\n def peek(self, title=None, **kwargs):\n \"\"\"\n Displays the GBM timeseries by calling\n `~sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries.plot`.\n\n .. plot::\n\n import sunpy.timeseries\n import sunpy.data.sample\n gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary')\n gbm.peek()\n\n Parameters\n ----------\n title : `str`, optional\n The title of the plot.\n **kwargs : `dict`\n Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`\n functions.\n \"\"\"\n if title is None:\n title = 'Fermi GBM Summary data ' + str(self.meta.get('DETNAM').values())\n fig, ax = plt.subplots()\n axes = self.plot(axes=ax, **kwargs)\n axes.set_title(title)\n fig.autofmt_xdate()\n return fig\n\n @classmethod\n def _parse_file(cls, filepath):\n \"\"\"\n Parses a GBM CSPEC FITS file.\n\n Parameters\n ----------\n filepath : `str`\n The path to the file you want to parse.\n \"\"\"\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)\n\n @classmethod\n def _parse_hdus(cls, hdulist):\n \"\"\"\n Parses a GBM CSPEC `astropy.io.fits.HDUList`.\n\n Parameters\n ----------\n hdulist : `str`\n The path to the file you want to parse.\n \"\"\"\n header = MetaDict(OrderedDict(hdulist[0].header))\n # these GBM files have three FITS extensions.\n # extn1 - this gives the energy range for each of the 128 energy bins\n # extn2 - this contains the data, e.g. counts, exposure time, time of observation\n # extn3 - eclipse times?\n energy_bins = hdulist[1].data\n count_data = hdulist[2].data\n\n # rebin the 128 energy channels into some summary ranges\n # 4-15 keV, 15 - 25 keV, 25-50 keV, 50-100 keV, 100-300 keV, 300-800 keV, 800 - 2000 keV\n # put the data in the units of counts/s/keV\n summary_counts = _bin_data_for_summary(energy_bins, count_data)\n\n # get the time information in datetime format with the correct MET adjustment\n met_ref_time = parse_time('2001-01-01 00:00') # Mission elapsed time\n gbm_times = met_ref_time + TimeDelta(count_data['time'], format='sec')\n gbm_times.precision = 9\n gbm_times = gbm_times.isot.astype('datetime64')\n\n column_labels = ['4-15 keV', '15-25 keV', '25-50 keV', '50-100 keV',\n '100-300 keV', '300-800 keV', '800-2000 keV']\n\n # Add the units data\n units = OrderedDict([('4-15 keV', u.ct / u.s / u.keV), ('15-25 keV', u.ct / u.s / u.keV),\n ('25-50 keV', u.ct / u.s / u.keV), ('50-100 keV', u.ct / u.s / u.keV),\n ('100-300 keV', u.ct / u.s / u.keV), ('300-800 keV', u.ct / u.s / u.keV),\n ('800-2000 keV', u.ct / u.s / u.keV)])\n return pd.DataFrame(summary_counts, columns=column_labels, index=gbm_times), header, units\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n \"\"\"\n Determines if the file corresponds to a GBM summary lightcurve\n `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n # Check if source is explicitly assigned\n if 'source' in kwargs.keys():\n if kwargs.get('source', ''):\n return kwargs.get('source', '').lower().startswith(cls._source)\n # Check if HDU defines the source instrument\n if 'meta' in kwargs.keys():\n return kwargs['meta'].get('INSTRUME', '').startswith('GBM')\n\n\ndef _bin_data_for_summary(energy_bins, count_data):\n \"\"\"\n Rebin the 128 energy channels into some summary ranges and put the data in\n the units of counts/s/keV.\n\n Bin ranges used:\n * 4-15 keV\n * 15-25 keV\n * 25-50 keV\n * 50-100 keV\n * 100-300 keV\n * 300-800 keV\n * 800-2000 keV\n\n Parameters\n ----------\n energy_bins : `numpy.ndarray`\n The array of energy bins to rebin.\n count_data : `numpy.ndarray`\n The array of count data to rebin.\n \"\"\"\n # find the indices corresponding to some standard summary energy bins\n ebands = [4, 15, 25, 50, 100, 300, 800, 2000]\n indices = []\n for e in ebands:\n indices.append(np.searchsorted(energy_bins['e_max'], e))\n\n summary_counts = []\n for i in range(0, len(count_data['counts'])):\n counts_in_bands = []\n for j in range(1, len(ebands)):\n counts_in_bands.append(\n np.sum(count_data['counts'][i][indices[j - 1]:indices[j]]) /\n (count_data['exposure'][i] *\n (energy_bins['e_max'][indices[j]] -\n energy_bins['e_min'][indices[j - 1]])))\n\n summary_counts.append(counts_in_bands)\n\n return summary_counts\n\n\ndef _parse_detector(detector):\n \"\"\"\n Check and fix detector name strings.\n\n Parameters\n ----------\n detector : `str`\n The detector name to check.\n \"\"\"\n oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',\n 'n10', 'n11']\n altlist = [str(i) for i in range(12)]\n if detector in oklist:\n return detector\n elif detector in altlist:\n return 'n' + detector\n else:\n raise ValueError('Detector string could not be interpreted')\n", "path": "sunpy/timeseries/sources/fermi_gbm.py"}], "after_files": [{"content": "\"\"\"\nThis module FERMI GBM `~sunpy.timeseries.TimeSeries` source.\n\"\"\"\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport astropy.units as u\nfrom astropy.time import TimeDelta\n\nimport sunpy.io\nfrom sunpy.time import parse_time\nfrom sunpy.timeseries.timeseriesbase import GenericTimeSeries\nfrom sunpy.util.metadata import MetaDict\nfrom sunpy.visualization import peek_show\n\n__all__ = ['GBMSummaryTimeSeries']\n\n\nclass GBMSummaryTimeSeries(GenericTimeSeries):\n \"\"\"\n Fermi/GBM Summary lightcurve TimeSeries.\n\n The Gamma-ray Burst Monitor (GBM) is an instrument on board Fermi.\n It is meant to detect gamma-ray bursts but also detects solar flares.\n It consists of 12 Sodium Iodide (NaI) scintillation detectors and 2 Bismuth Germanate (BGO) scintillation detectors.\n The NaI detectors cover from a few keV to about 1 MeV and provide burst triggers and locations.\n The BGO detectors cover the energy range from about 150 keV to about 30 MeV.\n\n This summary lightcurve makes use of the CSPEC (daily version) data set which consists of the counts\n accumulated every 4.096 seconds in 128 energy channels for each of the 14 detectors.\n\n Note that the data is re-binned from the original 128 into the following 8 pre-determined energy channels.\n The rebinning method treats the counts in each of the original 128 channels as\n all having the energy of the average energy of that channel. For example, the\n counts in an 14.5--15.6 keV original channel would all be accumulated into the\n 15--25 keV rebinned channel.\n\n * 4-15 keV\n * 15-25 keV\n * 25-50 keV\n * 50-100 keV\n * 100-300 keV\n * 300-800 keV\n * 800-2000 keV\n\n Examples\n --------\n >>> import sunpy.timeseries\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary') # doctest: +REMOTE_DATA\n >>> gbm.peek() # doctest: +SKIP\n\n References\n ----------\n * `Fermi Mission Homepage <https://fermi.gsfc.nasa.gov>`_\n * `Fermi GBM Homepage <https://fermi.gsfc.nasa.gov/science/instruments/gbm.html>`_\n * `Fermi Science Support Center <https://fermi.gsfc.nasa.gov/ssc/>`_\n * `Fermi Data Product <https://fermi.gsfc.nasa.gov/ssc/data/access/>`_\n * `GBM Instrument Papers <https://gammaray.nsstc.nasa.gov/gbm/publications/instrument_journal_gbm.html>`_\n \"\"\"\n # Class attribute used to specify the source class of the TimeSeries.\n _source = 'gbmsummary'\n\n def plot(self, axes=None, **kwargs):\n \"\"\"\n Plots the GBM timeseries.\n\n Parameters\n ----------\n axes : `matplotlib.axes.Axes`, optional\n The axes on which to plot the TimeSeries. Defaults to current axes.\n **kwargs : `dict`\n Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`\n functions.\n\n Returns\n -------\n `~matplotlib.axes.Axes`\n The plot axes.\n \"\"\"\n self._validate_data_for_plotting()\n if axes is None:\n axes = plt.gca()\n data_lab = self.to_dataframe().columns.values\n for d in data_lab:\n axes.plot(self.to_dataframe().index, self.to_dataframe()[d], label=d, **kwargs)\n axes.set_yscale(\"log\")\n axes.set_xlabel('Start time: ' + self.to_dataframe().index[0].strftime('%Y-%m-%d %H:%M:%S UT'))\n axes.set_ylabel('Counts/s/keV')\n axes.legend()\n return axes\n\n @peek_show\n def peek(self, title=None, **kwargs):\n \"\"\"\n Displays the GBM timeseries by calling\n `~sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries.plot`.\n\n .. plot::\n\n import sunpy.timeseries\n import sunpy.data.sample\n gbm = sunpy.timeseries.TimeSeries(sunpy.data.sample.GBM_TIMESERIES, source='GBMSummary')\n gbm.peek()\n\n Parameters\n ----------\n title : `str`, optional\n The title of the plot.\n **kwargs : `dict`\n Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`\n functions.\n \"\"\"\n if title is None:\n title = 'Fermi GBM Summary data ' + str(self.meta.get('DETNAM').values())\n fig, ax = plt.subplots()\n axes = self.plot(axes=ax, **kwargs)\n axes.set_title(title)\n fig.autofmt_xdate()\n return fig\n\n @classmethod\n def _parse_file(cls, filepath):\n \"\"\"\n Parses a GBM CSPEC FITS file.\n\n Parameters\n ----------\n filepath : `str`\n The path to the file you want to parse.\n \"\"\"\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)\n\n @classmethod\n def _parse_hdus(cls, hdulist):\n \"\"\"\n Parses a GBM CSPEC `astropy.io.fits.HDUList`.\n\n Parameters\n ----------\n hdulist : `str`\n The path to the file you want to parse.\n \"\"\"\n header = MetaDict(OrderedDict(hdulist[0].header))\n # these GBM files have three FITS extensions.\n # extn1 - this gives the energy range for each of the 128 energy bins\n # extn2 - this contains the data, e.g. counts, exposure time, time of observation\n # extn3 - eclipse times?\n energy_bins = hdulist[1].data\n count_data = hdulist[2].data\n\n # rebin the 128 energy channels into some summary ranges\n # 4-15 keV, 15 - 25 keV, 25-50 keV, 50-100 keV, 100-300 keV, 300-800 keV, 800 - 2000 keV\n # put the data in the units of counts/s/keV\n summary_counts = _bin_data_for_summary(energy_bins, count_data)\n\n # get the time information in datetime format with the correct MET adjustment\n met_ref_time = parse_time('2001-01-01 00:00') # Mission elapsed time\n gbm_times = met_ref_time + TimeDelta(count_data['time'], format='sec')\n gbm_times.precision = 9\n gbm_times = gbm_times.isot.astype('datetime64')\n\n column_labels = ['4-15 keV', '15-25 keV', '25-50 keV', '50-100 keV',\n '100-300 keV', '300-800 keV', '800-2000 keV']\n\n # Add the units data\n units = OrderedDict([('4-15 keV', u.ct / u.s / u.keV), ('15-25 keV', u.ct / u.s / u.keV),\n ('25-50 keV', u.ct / u.s / u.keV), ('50-100 keV', u.ct / u.s / u.keV),\n ('100-300 keV', u.ct / u.s / u.keV), ('300-800 keV', u.ct / u.s / u.keV),\n ('800-2000 keV', u.ct / u.s / u.keV)])\n return pd.DataFrame(summary_counts, columns=column_labels, index=gbm_times), header, units\n\n @classmethod\n def is_datasource_for(cls, **kwargs):\n \"\"\"\n Determines if the file corresponds to a GBM summary lightcurve\n `~sunpy.timeseries.TimeSeries`.\n \"\"\"\n # Check if source is explicitly assigned\n if 'source' in kwargs.keys():\n if kwargs.get('source', ''):\n return kwargs.get('source', '').lower().startswith(cls._source)\n # Check if HDU defines the source instrument\n if 'meta' in kwargs.keys():\n return kwargs['meta'].get('INSTRUME', '').startswith('GBM')\n\n\ndef _bin_data_for_summary(energy_bins, count_data):\n \"\"\"\n Rebin the 128 energy channels into some summary ranges and put the data in\n the units of counts/s/keV.\n\n Bin ranges used:\n * 4-15 keV\n * 15-25 keV\n * 25-50 keV\n * 50-100 keV\n * 100-300 keV\n * 300-800 keV\n * 800-2000 keV\n\n Parameters\n ----------\n energy_bins : `numpy.ndarray`\n The array of energy bins to rebin.\n count_data : `numpy.ndarray`\n The array of count data to rebin.\n \"\"\"\n\n # list of energy bands to sum between\n ebands = [4, 15, 25, 50, 100, 300, 800, 2000]\n e_center = (energy_bins['e_min'] + energy_bins['e_max']) / 2\n indices = [np.searchsorted(e_center, e) for e in ebands]\n\n summary_counts = []\n for ind_start, ind_end in zip(indices[:-1], indices[1:]):\n # sum the counts in the energy bands, and find counts/s/keV\n summed_counts = np.sum(count_data[\"counts\"][:, ind_start:ind_end], axis=1)\n energy_width = (energy_bins[\"e_max\"][ind_end - 1] - energy_bins[\"e_min\"][ind_start])\n summary_counts.append(summed_counts/energy_width/count_data[\"exposure\"])\n\n return np.array(summary_counts).T\n\n\ndef _parse_detector(detector):\n \"\"\"\n Check and fix detector name strings.\n\n Parameters\n ----------\n detector : `str`\n The detector name to check.\n \"\"\"\n oklist = ['n0', 'n1', 'n2', 'n3', 'n4', 'n5', 'n6', 'n7', 'n8', 'n9',\n 'n10', 'n11']\n altlist = [str(i) for i in range(12)]\n if detector in oklist:\n return detector\n elif detector in altlist:\n return 'n' + detector\n else:\n raise ValueError('Detector string could not be interpreted')\n", "path": "sunpy/timeseries/sources/fermi_gbm.py"}]}
| 3,694 | 688 |
gh_patches_debug_25043
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-3541
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] TapNet test failures and long test times
TapNet is currently failing the macOS 3.7 test (`Install and test / test-unix (3.7, macOS-11) (pull_request)`) in PR #3523. The PR makes no changes relevant to TapNet or DL estimators, and the test times are extremely high.
https://github.com/sktime/sktime/actions/runs/3174214839/jobs/5170841022

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/classification/deep_learning/tapnet.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Time Convolutional Neural Network (CNN) for classification."""
3
4 __author__ = [
5 "Jack Russon",
6 ]
7 __all__ = [
8 "TapNetClassifier",
9 ]
10
11 from sklearn.utils import check_random_state
12
13 from sktime.classification.deep_learning.base import BaseDeepClassifier
14 from sktime.networks.tapnet import TapNetNetwork
15 from sktime.utils.validation._dependencies import _check_dl_dependencies
16
17 _check_dl_dependencies(severity="warning")
18
19
20 class TapNetClassifier(BaseDeepClassifier):
21 """Implementation of TapNetClassifier, as described in [1].
22
23 Parameters
24 ----------
25 filter_sizes : array of int, default = (256, 256, 128)
26 sets the kernel size argument for each convolutional block.
27 Controls number of convolutional filters
28 and number of neurons in attention dense layers.
29 kernel_sizes : array of int, default = (8, 5, 3)
30 controls the size of the convolutional kernels
31 layers : array of int, default = (500, 300)
32 size of dense layers
33 reduction : int, default = 16
34 divides the number of dense neurons in the first layer of the attention block.
35 n_epochs : int, default = 2000
36 number of epochs to train the model
37 batch_size : int, default = 16
38 number of samples per update
39 dropout : float, default = 0.5
40 dropout rate, in the range [0, 1)
41 dilation : int, default = 1
42 dilation value
43 activation : str, default = "sigmoid"
44 activation function for the last output layer
45 loss : str, default = "binary_crossentropy"
46 loss function for the classifier
47 optimizer : str or None, default = "Adam(lr=0.01)"
48 gradient updating function for the classifer
49 use_bias : bool, default = True
50 whether to use bias in the output dense layer
51 use_rp : bool, default = True
52 whether to use random projections
53 use_att : bool, default = True
54 whether to use self attention
55 use_lstm : bool, default = True
56 whether to use an LSTM layer
57 use_cnn : bool, default = True
58 whether to use a CNN layer
59 verbose : bool, default = False
60 whether to output extra information
61 random_state : int or None, default = None
62 seed for random
63
64 Attributes
65 ----------
66 n_classes : int
67 number of classes extracted from the data
68
69 References
70 ----------
71 .. [1] Zhang et al. Tapnet: Multivariate time series classification with
72 attentional prototypical network,
73 Proceedings of the AAAI Conference on Artificial Intelligence
74 34(4), 6845-6852, 2020
75
76 Examples
77 --------
78 >>> from sktime.classification.deep_learning.tapnet import TapNetClassifier
79 >>> from sktime.datasets import load_unit_test
80 >>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
81 >>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
82 >>> tapnet = TapNetClassifier() # doctest: +SKIP
83 >>> tapnet.fit(X_train, y_train) # doctest: +SKIP
84 TapNetClassifier(...)
85 """
86
87 _tags = {"python_dependencies": "tensorflow"}
88
89 def __init__(
90 self,
91 n_epochs=2000,
92 batch_size=16,
93 dropout=0.5,
94 filter_sizes=(256, 256, 128),
95 kernel_size=(8, 5, 3),
96 dilation=1,
97 layers=(500, 300),
98 use_rp=True,
99 rp_params=(-1, 3),
100 activation="sigmoid",
101 use_bias=True,
102 use_att=True,
103 use_lstm=True,
104 use_cnn=True,
105 random_state=None,
106 padding="same",
107 loss="binary_crossentropy",
108 optimizer=None,
109 metrics=None,
110 callbacks=None,
111 verbose=False,
112 ):
113 _check_dl_dependencies(severity="error")
114 super(TapNetClassifier, self).__init__()
115
116 self.batch_size = batch_size
117 self.random_state = random_state
118 self.kernel_size = kernel_size
119 self.layers = layers
120 self.rp_params = rp_params
121 self.filter_sizes = filter_sizes
122 self.activation = activation
123 self.use_att = use_att
124 self.use_bias = use_bias
125
126 self.dilation = dilation
127 self.padding = padding
128 self.n_epochs = n_epochs
129 self.loss = loss
130 self.optimizer = optimizer
131 self.metrics = metrics
132 self.callbacks = callbacks
133 self.verbose = verbose
134
135 self._is_fitted = False
136
137 self.dropout = dropout
138 self.use_lstm = use_lstm
139 self.use_cnn = use_cnn
140
141 # parameters for random projection
142 self.use_rp = use_rp
143 self.rp_params = rp_params
144
145 self._network = TapNetNetwork()
146
147 def build_model(self, input_shape, n_classes, **kwargs):
148 """Construct a complied, un-trained, keras model that is ready for training.
149
150 In sktime, time series are stored in numpy arrays of shape (d,m), where d
151 is the number of dimensions, m is the series length. Keras/tensorflow assume
152 data is in shape (m,d). This method also assumes (m,d). Transpose should
153 happen in fit.
154
155 Parameters
156 ----------
157 input_shape : tuple
158 The shape of the data fed into the input layer, should be (m, d)
159 n_classes : int
160 The number of classes, which becomes the size of the output layer
161
162 Returns
163 -------
164 output: a compiled Keras model
165 """
166 import tensorflow as tf
167 from tensorflow import keras
168
169 tf.random.set_seed(self.random_state)
170
171 if self.metrics is None:
172 metrics = ["accuracy"]
173 else:
174 metrics = self.metrics
175
176 input_layer, output_layer = self._network.build_network(input_shape, **kwargs)
177
178 output_layer = keras.layers.Dense(
179 units=n_classes, activation=self.activation, use_bias=self.use_bias
180 )(output_layer)
181
182 self.optimizer_ = (
183 keras.optimizers.Adam(learning_rate=0.01)
184 if self.optimizer is None
185 else self.optimizer
186 )
187
188 model = keras.models.Model(inputs=input_layer, outputs=output_layer)
189 model.compile(
190 loss=self.loss,
191 optimizer=self.optimizer_,
192 metrics=metrics,
193 )
194
195 return model
196
197 def _fit(self, X, y):
198 """Fit the classifier on the training set (X, y).
199
200 Parameters
201 ----------
202 X : np.ndarray of shape = (n_instances(n), n_dimensions(d), series_length(m))
203 Input training samples
204 y : np.ndarray of shape n
205 Input training class labels
206
207 Returns
208 -------
209 self: object
210 """
211 if self.callbacks is None:
212 self._callbacks = []
213
214 y_onehot = self.convert_y_to_keras(y)
215 # Transpose to conform to expectation format by keras
216 X = X.transpose(0, 2, 1)
217
218 check_random_state(self.random_state)
219 self.input_shape = X.shape[1:]
220 self.model_ = self.build_model(self.input_shape, self.n_classes_)
221 if self.verbose:
222 self.model_.summary()
223 self.history = self.model_.fit(
224 X,
225 y_onehot,
226 batch_size=self.batch_size,
227 epochs=self.n_epochs,
228 verbose=self.verbose,
229 callbacks=self._callbacks,
230 )
231
232 return self
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/classification/deep_learning/tapnet.py b/sktime/classification/deep_learning/tapnet.py
--- a/sktime/classification/deep_learning/tapnet.py
+++ b/sktime/classification/deep_learning/tapnet.py
@@ -3,6 +3,8 @@
__author__ = [
"Jack Russon",
+ "TonyBagnall",
+ "achieveordie",
]
__all__ = [
"TapNetClassifier",
@@ -230,3 +232,33 @@
)
return self
+
+ @classmethod
+ def get_test_params(cls, parameter_set="default"):
+ """Return testing parameter settings for the estimator.
+
+ Parameters
+ ----------
+ parameter_set : str, default="default"
+ Name of the set of test parameters to return, for use in tests. If no
+ special parameters are defined for a value, will return `"default"` set.
+ For classifiers, a "default" set of parameters should be provided for
+ general testing, and a "results_comparison" set for comparing against
+ previously recorded results if the general set does not produce suitable
+ probabilities to compare against.
+
+ Returns
+ -------
+ params : dict or list of dict, default={}
+ Parameters to create testing instances of the class.
+ Each dict are parameters to construct an "interesting" test instance, i.e.,
+ `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
+ `create_test_instance` uses the first (or only) dictionary in `params`.
+ """
+ return {
+ "n_epochs": 50,
+ "batch_size": 32,
+ "filter_sizes": (128, 128, 64),
+ "dilation": 2,
+ "layers": (200, 100),
+ }
|
{"golden_diff": "diff --git a/sktime/classification/deep_learning/tapnet.py b/sktime/classification/deep_learning/tapnet.py\n--- a/sktime/classification/deep_learning/tapnet.py\n+++ b/sktime/classification/deep_learning/tapnet.py\n@@ -3,6 +3,8 @@\n \n __author__ = [\n \"Jack Russon\",\n+ \"TonyBagnall\",\n+ \"achieveordie\",\n ]\n __all__ = [\n \"TapNetClassifier\",\n@@ -230,3 +232,33 @@\n )\n \n return self\n+\n+ @classmethod\n+ def get_test_params(cls, parameter_set=\"default\"):\n+ \"\"\"Return testing parameter settings for the estimator.\n+\n+ Parameters\n+ ----------\n+ parameter_set : str, default=\"default\"\n+ Name of the set of test parameters to return, for use in tests. If no\n+ special parameters are defined for a value, will return `\"default\"` set.\n+ For classifiers, a \"default\" set of parameters should be provided for\n+ general testing, and a \"results_comparison\" set for comparing against\n+ previously recorded results if the general set does not produce suitable\n+ probabilities to compare against.\n+\n+ Returns\n+ -------\n+ params : dict or list of dict, default={}\n+ Parameters to create testing instances of the class.\n+ Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n+ `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n+ `create_test_instance` uses the first (or only) dictionary in `params`.\n+ \"\"\"\n+ return {\n+ \"n_epochs\": 50,\n+ \"batch_size\": 32,\n+ \"filter_sizes\": (128, 128, 64),\n+ \"dilation\": 2,\n+ \"layers\": (200, 100),\n+ }\n", "issue": "[BUG] TapNet test failures and long test times\nTapNet is currently failing the macOS 3.7 test (`Install and test / test-unix (3.7, macOS-11) (pull_request)`) in PR #3523. The PR makes no changes relevant to TapNet or DL estimators, and the test times are extremely high.\r\n\r\nhttps://github.com/sktime/sktime/actions/runs/3174214839/jobs/5170841022\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Time Convolutional Neural Network (CNN) for classification.\"\"\"\n\n__author__ = [\n \"Jack Russon\",\n]\n__all__ = [\n \"TapNetClassifier\",\n]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\nfrom sktime.networks.tapnet import TapNetNetwork\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass TapNetClassifier(BaseDeepClassifier):\n \"\"\"Implementation of TapNetClassifier, as described in [1].\n\n Parameters\n ----------\n filter_sizes : array of int, default = (256, 256, 128)\n sets the kernel size argument for each convolutional block.\n Controls number of convolutional filters\n and number of neurons in attention dense layers.\n kernel_sizes : array of int, default = (8, 5, 3)\n controls the size of the convolutional kernels\n layers : array of int, default = (500, 300)\n size of dense layers\n reduction : int, default = 16\n divides the number of dense neurons in the first layer of the attention block.\n n_epochs : int, default = 2000\n number of epochs to train the model\n batch_size : int, default = 16\n number of samples per update\n dropout : float, default = 0.5\n dropout rate, in the range [0, 1)\n dilation : int, default = 1\n dilation value\n activation : str, default = \"sigmoid\"\n activation function for the last output layer\n loss : str, default = \"binary_crossentropy\"\n loss function for the classifier\n optimizer : str or None, default = \"Adam(lr=0.01)\"\n gradient updating function for the classifer\n use_bias : bool, default = True\n whether to use bias in the output dense layer\n use_rp : bool, default = True\n whether to use random projections\n use_att : bool, default = True\n whether to use self attention\n use_lstm : bool, default = True\n whether to use an LSTM layer\n use_cnn : bool, default = True\n whether to use a CNN layer\n verbose : bool, default = False\n whether to output extra information\n random_state : int or None, default = None\n seed for random\n\n Attributes\n ----------\n n_classes : int\n number of classes extracted from the data\n\n References\n ----------\n .. [1] Zhang et al. Tapnet: Multivariate time series classification with\n attentional prototypical network,\n Proceedings of the AAAI Conference on Artificial Intelligence\n 34(4), 6845-6852, 2020\n\n Examples\n --------\n >>> from sktime.classification.deep_learning.tapnet import TapNetClassifier\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True)\n >>> tapnet = TapNetClassifier() # doctest: +SKIP\n >>> tapnet.fit(X_train, y_train) # doctest: +SKIP\n TapNetClassifier(...)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=16,\n dropout=0.5,\n filter_sizes=(256, 256, 128),\n kernel_size=(8, 5, 3),\n dilation=1,\n layers=(500, 300),\n use_rp=True,\n rp_params=(-1, 3),\n activation=\"sigmoid\",\n use_bias=True,\n use_att=True,\n use_lstm=True,\n use_cnn=True,\n random_state=None,\n padding=\"same\",\n loss=\"binary_crossentropy\",\n optimizer=None,\n metrics=None,\n callbacks=None,\n verbose=False,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(TapNetClassifier, self).__init__()\n\n self.batch_size = batch_size\n self.random_state = random_state\n self.kernel_size = kernel_size\n self.layers = layers\n self.rp_params = rp_params\n self.filter_sizes = filter_sizes\n self.activation = activation\n self.use_att = use_att\n self.use_bias = use_bias\n\n self.dilation = dilation\n self.padding = padding\n self.n_epochs = n_epochs\n self.loss = loss\n self.optimizer = optimizer\n self.metrics = metrics\n self.callbacks = callbacks\n self.verbose = verbose\n\n self._is_fitted = False\n\n self.dropout = dropout\n self.use_lstm = use_lstm\n self.use_cnn = use_cnn\n\n # parameters for random projection\n self.use_rp = use_rp\n self.rp_params = rp_params\n\n self._network = TapNetNetwork()\n\n def build_model(self, input_shape, n_classes, **kwargs):\n \"\"\"Construct a complied, un-trained, keras model that is ready for training.\n\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\n is the number of dimensions, m is the series length. Keras/tensorflow assume\n data is in shape (m,d). This method also assumes (m,d). Transpose should\n happen in fit.\n\n Parameters\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer, should be (m, d)\n n_classes : int\n The number of classes, which becomes the size of the output layer\n\n Returns\n -------\n output: a compiled Keras model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n if self.metrics is None:\n metrics = [\"accuracy\"]\n else:\n metrics = self.metrics\n\n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(\n units=n_classes, activation=self.activation, use_bias=self.use_bias\n )(output_layer)\n\n self.optimizer_ = (\n keras.optimizers.Adam(learning_rate=0.01)\n if self.optimizer is None\n else self.optimizer\n )\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n model.compile(\n loss=self.loss,\n optimizer=self.optimizer_,\n metrics=metrics,\n )\n\n return model\n\n def _fit(self, X, y):\n \"\"\"Fit the classifier on the training set (X, y).\n\n Parameters\n ----------\n X : np.ndarray of shape = (n_instances(n), n_dimensions(d), series_length(m))\n Input training samples\n y : np.ndarray of shape n\n Input training class labels\n\n Returns\n -------\n self: object\n \"\"\"\n if self.callbacks is None:\n self._callbacks = []\n\n y_onehot = self.convert_y_to_keras(y)\n # Transpose to conform to expectation format by keras\n X = X.transpose(0, 2, 1)\n\n check_random_state(self.random_state)\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\n if self.verbose:\n self.model_.summary()\n self.history = self.model_.fit(\n X,\n y_onehot,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n\n return self\n", "path": "sktime/classification/deep_learning/tapnet.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Time Convolutional Neural Network (CNN) for classification.\"\"\"\n\n__author__ = [\n \"Jack Russon\",\n \"TonyBagnall\",\n \"achieveordie\",\n]\n__all__ = [\n \"TapNetClassifier\",\n]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\nfrom sktime.networks.tapnet import TapNetNetwork\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass TapNetClassifier(BaseDeepClassifier):\n \"\"\"Implementation of TapNetClassifier, as described in [1].\n\n Parameters\n ----------\n filter_sizes : array of int, default = (256, 256, 128)\n sets the kernel size argument for each convolutional block.\n Controls number of convolutional filters\n and number of neurons in attention dense layers.\n kernel_sizes : array of int, default = (8, 5, 3)\n controls the size of the convolutional kernels\n layers : array of int, default = (500, 300)\n size of dense layers\n reduction : int, default = 16\n divides the number of dense neurons in the first layer of the attention block.\n n_epochs : int, default = 2000\n number of epochs to train the model\n batch_size : int, default = 16\n number of samples per update\n dropout : float, default = 0.5\n dropout rate, in the range [0, 1)\n dilation : int, default = 1\n dilation value\n activation : str, default = \"sigmoid\"\n activation function for the last output layer\n loss : str, default = \"binary_crossentropy\"\n loss function for the classifier\n optimizer : str or None, default = \"Adam(lr=0.01)\"\n gradient updating function for the classifer\n use_bias : bool, default = True\n whether to use bias in the output dense layer\n use_rp : bool, default = True\n whether to use random projections\n use_att : bool, default = True\n whether to use self attention\n use_lstm : bool, default = True\n whether to use an LSTM layer\n use_cnn : bool, default = True\n whether to use a CNN layer\n verbose : bool, default = False\n whether to output extra information\n random_state : int or None, default = None\n seed for random\n\n Attributes\n ----------\n n_classes : int\n number of classes extracted from the data\n\n References\n ----------\n .. [1] Zhang et al. Tapnet: Multivariate time series classification with\n attentional prototypical network,\n Proceedings of the AAAI Conference on Artificial Intelligence\n 34(4), 6845-6852, 2020\n\n Examples\n --------\n >>> from sktime.classification.deep_learning.tapnet import TapNetClassifier\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True)\n >>> tapnet = TapNetClassifier() # doctest: +SKIP\n >>> tapnet.fit(X_train, y_train) # doctest: +SKIP\n TapNetClassifier(...)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=16,\n dropout=0.5,\n filter_sizes=(256, 256, 128),\n kernel_size=(8, 5, 3),\n dilation=1,\n layers=(500, 300),\n use_rp=True,\n rp_params=(-1, 3),\n activation=\"sigmoid\",\n use_bias=True,\n use_att=True,\n use_lstm=True,\n use_cnn=True,\n random_state=None,\n padding=\"same\",\n loss=\"binary_crossentropy\",\n optimizer=None,\n metrics=None,\n callbacks=None,\n verbose=False,\n ):\n _check_dl_dependencies(severity=\"error\")\n super(TapNetClassifier, self).__init__()\n\n self.batch_size = batch_size\n self.random_state = random_state\n self.kernel_size = kernel_size\n self.layers = layers\n self.rp_params = rp_params\n self.filter_sizes = filter_sizes\n self.activation = activation\n self.use_att = use_att\n self.use_bias = use_bias\n\n self.dilation = dilation\n self.padding = padding\n self.n_epochs = n_epochs\n self.loss = loss\n self.optimizer = optimizer\n self.metrics = metrics\n self.callbacks = callbacks\n self.verbose = verbose\n\n self._is_fitted = False\n\n self.dropout = dropout\n self.use_lstm = use_lstm\n self.use_cnn = use_cnn\n\n # parameters for random projection\n self.use_rp = use_rp\n self.rp_params = rp_params\n\n self._network = TapNetNetwork()\n\n def build_model(self, input_shape, n_classes, **kwargs):\n \"\"\"Construct a complied, un-trained, keras model that is ready for training.\n\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\n is the number of dimensions, m is the series length. Keras/tensorflow assume\n data is in shape (m,d). This method also assumes (m,d). Transpose should\n happen in fit.\n\n Parameters\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer, should be (m, d)\n n_classes : int\n The number of classes, which becomes the size of the output layer\n\n Returns\n -------\n output: a compiled Keras model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n if self.metrics is None:\n metrics = [\"accuracy\"]\n else:\n metrics = self.metrics\n\n input_layer, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(\n units=n_classes, activation=self.activation, use_bias=self.use_bias\n )(output_layer)\n\n self.optimizer_ = (\n keras.optimizers.Adam(learning_rate=0.01)\n if self.optimizer is None\n else self.optimizer\n )\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n model.compile(\n loss=self.loss,\n optimizer=self.optimizer_,\n metrics=metrics,\n )\n\n return model\n\n def _fit(self, X, y):\n \"\"\"Fit the classifier on the training set (X, y).\n\n Parameters\n ----------\n X : np.ndarray of shape = (n_instances(n), n_dimensions(d), series_length(m))\n Input training samples\n y : np.ndarray of shape n\n Input training class labels\n\n Returns\n -------\n self: object\n \"\"\"\n if self.callbacks is None:\n self._callbacks = []\n\n y_onehot = self.convert_y_to_keras(y)\n # Transpose to conform to expectation format by keras\n X = X.transpose(0, 2, 1)\n\n check_random_state(self.random_state)\n self.input_shape = X.shape[1:]\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\n if self.verbose:\n self.model_.summary()\n self.history = self.model_.fit(\n X,\n y_onehot,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n\n return self\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n return {\n \"n_epochs\": 50,\n \"batch_size\": 32,\n \"filter_sizes\": (128, 128, 64),\n \"dilation\": 2,\n \"layers\": (200, 100),\n }\n", "path": "sktime/classification/deep_learning/tapnet.py"}]}
| 2,751 | 437 |
gh_patches_debug_12120
|
rasdani/github-patches
|
git_diff
|
numpy__numpy-13177
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong std for implicitly complex input
<!-- Please describe the issue in detail here, and fill in the fields below -->
Standart deviation formula could give unexpected result (e.g. imaginary values) when input is implicitly complex.
### Reproducing code example:
<!-- A short code example that reproduces the problem/missing feature. It should be
self-contained, i.e., possible to run as-is via 'python myproblem.py' -->
```python
import numpy as np
a = np.array([None, 0])
a[0] = 1j
b = np.array([1j, 0])
print(f'res={np.std(a)}, dtype={a.dtype}') # gives imaginary std
print(f'res={np.std(b)}, dtype={b.dtype}')
```
<!-- Remove these sections for a feature request -->
### Error message:
<!-- If you are reporting a segfault please include a GDB traceback, which you
can generate by following
https://github.com/numpy/numpy/blob/master/doc/source/dev/development_environment.rst#debugging -->
<!-- Full error message, if any (starting from line Traceback: ...) -->
### Numpy/Python version information:
numpy 1.16.2
python 3.7
<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->
res=0.5j, dtype=object
res=0.5, dtype=complex128
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpy/core/_methods.py`
Content:
```
1 """
2 Array methods which are called by both the C-code for the method
3 and the Python code for the NumPy-namespace function
4
5 """
6 from __future__ import division, absolute_import, print_function
7
8 import warnings
9
10 from numpy.core import multiarray as mu
11 from numpy.core import umath as um
12 from numpy.core.numeric import asanyarray
13 from numpy.core import numerictypes as nt
14 from numpy._globals import _NoValue
15
16 # save those O(100) nanoseconds!
17 umr_maximum = um.maximum.reduce
18 umr_minimum = um.minimum.reduce
19 umr_sum = um.add.reduce
20 umr_prod = um.multiply.reduce
21 umr_any = um.logical_or.reduce
22 umr_all = um.logical_and.reduce
23
24 # avoid keyword arguments to speed up parsing, saves about 15%-20% for very
25 # small reductions
26 def _amax(a, axis=None, out=None, keepdims=False,
27 initial=_NoValue, where=True):
28 return umr_maximum(a, axis, None, out, keepdims, initial, where)
29
30 def _amin(a, axis=None, out=None, keepdims=False,
31 initial=_NoValue, where=True):
32 return umr_minimum(a, axis, None, out, keepdims, initial, where)
33
34 def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
35 initial=_NoValue, where=True):
36 return umr_sum(a, axis, dtype, out, keepdims, initial, where)
37
38 def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
39 initial=_NoValue, where=True):
40 return umr_prod(a, axis, dtype, out, keepdims, initial, where)
41
42 def _any(a, axis=None, dtype=None, out=None, keepdims=False):
43 return umr_any(a, axis, dtype, out, keepdims)
44
45 def _all(a, axis=None, dtype=None, out=None, keepdims=False):
46 return umr_all(a, axis, dtype, out, keepdims)
47
48 def _count_reduce_items(arr, axis):
49 if axis is None:
50 axis = tuple(range(arr.ndim))
51 if not isinstance(axis, tuple):
52 axis = (axis,)
53 items = 1
54 for ax in axis:
55 items *= arr.shape[ax]
56 return items
57
58 def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
59 arr = asanyarray(a)
60
61 is_float16_result = False
62 rcount = _count_reduce_items(arr, axis)
63 # Make this warning show up first
64 if rcount == 0:
65 warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
66
67 # Cast bool, unsigned int, and int to float64 by default
68 if dtype is None:
69 if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
70 dtype = mu.dtype('f8')
71 elif issubclass(arr.dtype.type, nt.float16):
72 dtype = mu.dtype('f4')
73 is_float16_result = True
74
75 ret = umr_sum(arr, axis, dtype, out, keepdims)
76 if isinstance(ret, mu.ndarray):
77 ret = um.true_divide(
78 ret, rcount, out=ret, casting='unsafe', subok=False)
79 if is_float16_result and out is None:
80 ret = arr.dtype.type(ret)
81 elif hasattr(ret, 'dtype'):
82 if is_float16_result:
83 ret = arr.dtype.type(ret / rcount)
84 else:
85 ret = ret.dtype.type(ret / rcount)
86 else:
87 ret = ret / rcount
88
89 return ret
90
91 def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
92 arr = asanyarray(a)
93
94 rcount = _count_reduce_items(arr, axis)
95 # Make this warning show up on top.
96 if ddof >= rcount:
97 warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
98 stacklevel=2)
99
100 # Cast bool, unsigned int, and int to float64 by default
101 if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
102 dtype = mu.dtype('f8')
103
104 # Compute the mean.
105 # Note that if dtype is not of inexact type then arraymean will
106 # not be either.
107 arrmean = umr_sum(arr, axis, dtype, keepdims=True)
108 if isinstance(arrmean, mu.ndarray):
109 arrmean = um.true_divide(
110 arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
111 else:
112 arrmean = arrmean.dtype.type(arrmean / rcount)
113
114 # Compute sum of squared deviations from mean
115 # Note that x may not be inexact and that we need it to be an array,
116 # not a scalar.
117 x = asanyarray(arr - arrmean)
118 if issubclass(arr.dtype.type, nt.complexfloating):
119 x = um.multiply(x, um.conjugate(x), out=x).real
120 else:
121 x = um.multiply(x, x, out=x)
122 ret = umr_sum(x, axis, dtype, out, keepdims)
123
124 # Compute degrees of freedom and make sure it is not negative.
125 rcount = max([rcount - ddof, 0])
126
127 # divide by degrees of freedom
128 if isinstance(ret, mu.ndarray):
129 ret = um.true_divide(
130 ret, rcount, out=ret, casting='unsafe', subok=False)
131 elif hasattr(ret, 'dtype'):
132 ret = ret.dtype.type(ret / rcount)
133 else:
134 ret = ret / rcount
135
136 return ret
137
138 def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
139 ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
140 keepdims=keepdims)
141
142 if isinstance(ret, mu.ndarray):
143 ret = um.sqrt(ret, out=ret)
144 elif hasattr(ret, 'dtype'):
145 ret = ret.dtype.type(um.sqrt(ret))
146 else:
147 ret = um.sqrt(ret)
148
149 return ret
150
151 def _ptp(a, axis=None, out=None, keepdims=False):
152 return um.subtract(
153 umr_maximum(a, axis, None, out, keepdims),
154 umr_minimum(a, axis, None, None, keepdims),
155 out
156 )
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -115,10 +115,11 @@
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
- if issubclass(arr.dtype.type, nt.complexfloating):
- x = um.multiply(x, um.conjugate(x), out=x).real
- else:
+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
+ else:
+ x = um.multiply(x, um.conjugate(x), out=x).real
+
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
|
{"golden_diff": "diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py\n--- a/numpy/core/_methods.py\n+++ b/numpy/core/_methods.py\n@@ -115,10 +115,11 @@\n # Note that x may not be inexact and that we need it to be an array,\n # not a scalar.\n x = asanyarray(arr - arrmean)\n- if issubclass(arr.dtype.type, nt.complexfloating):\n- x = um.multiply(x, um.conjugate(x), out=x).real\n- else:\n+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):\n x = um.multiply(x, x, out=x)\n+ else:\n+ x = um.multiply(x, um.conjugate(x), out=x).real\n+\n ret = umr_sum(x, axis, dtype, out, keepdims)\n \n # Compute degrees of freedom and make sure it is not negative.\n", "issue": "Wrong std for implicitly complex input\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\n\r\nStandart deviation formula could give unexpected result (e.g. imaginary values) when input is implicitly complex.\r\n\r\n### Reproducing code example:\r\n\r\n<!-- A short code example that reproduces the problem/missing feature. It should be\r\nself-contained, i.e., possible to run as-is via 'python myproblem.py' -->\r\n\r\n```python\r\nimport numpy as np\r\na = np.array([None, 0])\r\na[0] = 1j\r\nb = np.array([1j, 0])\r\n\r\nprint(f'res={np.std(a)}, dtype={a.dtype}') # gives imaginary std\r\nprint(f'res={np.std(b)}, dtype={b.dtype}')\r\n\r\n```\r\n\r\n<!-- Remove these sections for a feature request -->\r\n\r\n### Error message:\r\n\r\n<!-- If you are reporting a segfault please include a GDB traceback, which you\r\ncan generate by following\r\nhttps://github.com/numpy/numpy/blob/master/doc/source/dev/development_environment.rst#debugging -->\r\n\r\n<!-- Full error message, if any (starting from line Traceback: ...) -->\r\n\r\n### Numpy/Python version information:\r\nnumpy 1.16.2\r\npython 3.7\r\n\r\n<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->\r\nres=0.5j, dtype=object\r\nres=0.5, dtype=complex128\r\n\n", "before_files": [{"content": "\"\"\"\nArray methods which are called by both the C-code for the method\nand the Python code for the NumPy-namespace function\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport warnings\n\nfrom numpy.core import multiarray as mu\nfrom numpy.core import umath as um\nfrom numpy.core.numeric import asanyarray\nfrom numpy.core import numerictypes as nt\nfrom numpy._globals import _NoValue\n\n# save those O(100) nanoseconds!\numr_maximum = um.maximum.reduce\numr_minimum = um.minimum.reduce\numr_sum = um.add.reduce\numr_prod = um.multiply.reduce\numr_any = um.logical_or.reduce\numr_all = um.logical_and.reduce\n\n# avoid keyword arguments to speed up parsing, saves about 15%-20% for very\n# small reductions\ndef _amax(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_maximum(a, axis, None, out, keepdims, initial, where)\n\ndef _amin(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_minimum(a, axis, None, out, keepdims, initial, where)\n\ndef _sum(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_sum(a, axis, dtype, out, keepdims, initial, where)\n\ndef _prod(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_prod(a, axis, dtype, out, keepdims, initial, where)\n\ndef _any(a, axis=None, dtype=None, out=None, keepdims=False):\n return umr_any(a, axis, dtype, out, keepdims)\n\ndef _all(a, axis=None, dtype=None, out=None, keepdims=False):\n return umr_all(a, axis, dtype, out, keepdims)\n\ndef _count_reduce_items(arr, axis):\n if axis is None:\n axis = tuple(range(arr.ndim))\n if not isinstance(axis, tuple):\n axis = (axis,)\n items = 1\n for ax in axis:\n items *= arr.shape[ax]\n return items\n\ndef _mean(a, axis=None, dtype=None, out=None, keepdims=False):\n arr = asanyarray(a)\n\n is_float16_result = False\n rcount = _count_reduce_items(arr, axis)\n # Make this warning show up first\n if rcount == 0:\n warnings.warn(\"Mean of empty slice.\", RuntimeWarning, stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None:\n if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):\n dtype = mu.dtype('f8')\n elif issubclass(arr.dtype.type, nt.float16):\n dtype = mu.dtype('f4')\n is_float16_result = True\n\n ret = umr_sum(arr, axis, dtype, out, keepdims)\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n if is_float16_result and out is None:\n ret = arr.dtype.type(ret)\n elif hasattr(ret, 'dtype'):\n if is_float16_result:\n ret = arr.dtype.type(ret / rcount)\n else:\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n arr = asanyarray(a)\n\n rcount = _count_reduce_items(arr, axis)\n # Make this warning show up on top.\n if ddof >= rcount:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning,\n stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):\n dtype = mu.dtype('f8')\n\n # Compute the mean.\n # Note that if dtype is not of inexact type then arraymean will\n # not be either.\n arrmean = umr_sum(arr, axis, dtype, keepdims=True)\n if isinstance(arrmean, mu.ndarray):\n arrmean = um.true_divide(\n arrmean, rcount, out=arrmean, casting='unsafe', subok=False)\n else:\n arrmean = arrmean.dtype.type(arrmean / rcount)\n\n # Compute sum of squared deviations from mean\n # Note that x may not be inexact and that we need it to be an array,\n # not a scalar.\n x = asanyarray(arr - arrmean)\n if issubclass(arr.dtype.type, nt.complexfloating):\n x = um.multiply(x, um.conjugate(x), out=x).real\n else:\n x = um.multiply(x, x, out=x)\n ret = umr_sum(x, axis, dtype, out, keepdims)\n\n # Compute degrees of freedom and make sure it is not negative.\n rcount = max([rcount - ddof, 0])\n\n # divide by degrees of freedom\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims)\n\n if isinstance(ret, mu.ndarray):\n ret = um.sqrt(ret, out=ret)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(um.sqrt(ret))\n else:\n ret = um.sqrt(ret)\n\n return ret\n\ndef _ptp(a, axis=None, out=None, keepdims=False):\n return um.subtract(\n umr_maximum(a, axis, None, out, keepdims),\n umr_minimum(a, axis, None, None, keepdims),\n out\n )\n", "path": "numpy/core/_methods.py"}], "after_files": [{"content": "\"\"\"\nArray methods which are called by both the C-code for the method\nand the Python code for the NumPy-namespace function\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport warnings\n\nfrom numpy.core import multiarray as mu\nfrom numpy.core import umath as um\nfrom numpy.core.numeric import asanyarray\nfrom numpy.core import numerictypes as nt\nfrom numpy._globals import _NoValue\n\n# save those O(100) nanoseconds!\numr_maximum = um.maximum.reduce\numr_minimum = um.minimum.reduce\numr_sum = um.add.reduce\numr_prod = um.multiply.reduce\numr_any = um.logical_or.reduce\numr_all = um.logical_and.reduce\n\n# avoid keyword arguments to speed up parsing, saves about 15%-20% for very\n# small reductions\ndef _amax(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_maximum(a, axis, None, out, keepdims, initial, where)\n\ndef _amin(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_minimum(a, axis, None, out, keepdims, initial, where)\n\ndef _sum(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_sum(a, axis, dtype, out, keepdims, initial, where)\n\ndef _prod(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_prod(a, axis, dtype, out, keepdims, initial, where)\n\ndef _any(a, axis=None, dtype=None, out=None, keepdims=False):\n return umr_any(a, axis, dtype, out, keepdims)\n\ndef _all(a, axis=None, dtype=None, out=None, keepdims=False):\n return umr_all(a, axis, dtype, out, keepdims)\n\ndef _count_reduce_items(arr, axis):\n if axis is None:\n axis = tuple(range(arr.ndim))\n if not isinstance(axis, tuple):\n axis = (axis,)\n items = 1\n for ax in axis:\n items *= arr.shape[ax]\n return items\n\ndef _mean(a, axis=None, dtype=None, out=None, keepdims=False):\n arr = asanyarray(a)\n\n is_float16_result = False\n rcount = _count_reduce_items(arr, axis)\n # Make this warning show up first\n if rcount == 0:\n warnings.warn(\"Mean of empty slice.\", RuntimeWarning, stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None:\n if issubclass(arr.dtype.type, (nt.integer, nt.bool_)):\n dtype = mu.dtype('f8')\n elif issubclass(arr.dtype.type, nt.float16):\n dtype = mu.dtype('f4')\n is_float16_result = True\n\n ret = umr_sum(arr, axis, dtype, out, keepdims)\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n if is_float16_result and out is None:\n ret = arr.dtype.type(ret)\n elif hasattr(ret, 'dtype'):\n if is_float16_result:\n ret = arr.dtype.type(ret / rcount)\n else:\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n arr = asanyarray(a)\n\n rcount = _count_reduce_items(arr, axis)\n # Make this warning show up on top.\n if ddof >= rcount:\n warnings.warn(\"Degrees of freedom <= 0 for slice\", RuntimeWarning,\n stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):\n dtype = mu.dtype('f8')\n\n # Compute the mean.\n # Note that if dtype is not of inexact type then arraymean will\n # not be either.\n arrmean = umr_sum(arr, axis, dtype, keepdims=True)\n if isinstance(arrmean, mu.ndarray):\n arrmean = um.true_divide(\n arrmean, rcount, out=arrmean, casting='unsafe', subok=False)\n else:\n arrmean = arrmean.dtype.type(arrmean / rcount)\n\n # Compute sum of squared deviations from mean\n # Note that x may not be inexact and that we need it to be an array,\n # not a scalar.\n x = asanyarray(arr - arrmean)\n if issubclass(arr.dtype.type, (nt.floating, nt.integer)):\n x = um.multiply(x, x, out=x)\n else:\n x = um.multiply(x, um.conjugate(x), out=x).real\n\n ret = umr_sum(x, axis, dtype, out, keepdims)\n\n # Compute degrees of freedom and make sure it is not negative.\n rcount = max([rcount - ddof, 0])\n\n # divide by degrees of freedom\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims)\n\n if isinstance(ret, mu.ndarray):\n ret = um.sqrt(ret, out=ret)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(um.sqrt(ret))\n else:\n ret = um.sqrt(ret)\n\n return ret\n\ndef _ptp(a, axis=None, out=None, keepdims=False):\n return um.subtract(\n umr_maximum(a, axis, None, out, keepdims),\n umr_minimum(a, axis, None, None, keepdims),\n out\n )\n", "path": "numpy/core/_methods.py"}]}
| 2,335 | 212 |
gh_patches_debug_26390
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove or edit unreachable code block
@minrk It looks like `_check_user_model` has a few code lines that don't seem to be reachable. Should we remove the lines after the return statement? https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/apihandlers/base.py#L144
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/apihandlers/base.py`
Content:
```
1 """Base API handlers"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 import json
6
7 from http.client import responses
8
9 from tornado import web
10
11 from ..handlers import BaseHandler
12 from ..utils import url_path_join
13
14 class APIHandler(BaseHandler):
15
16 def check_referer(self):
17 """Check Origin for cross-site API requests.
18
19 Copied from WebSocket with changes:
20
21 - allow unspecified host/referer (e.g. scripts)
22 """
23 host = self.request.headers.get("Host")
24 referer = self.request.headers.get("Referer")
25
26 # If no header is provided, assume it comes from a script/curl.
27 # We are only concerned with cross-site browser stuff here.
28 if not host:
29 self.log.warning("Blocking API request with no host")
30 return False
31 if not referer:
32 self.log.warning("Blocking API request with no referer")
33 return False
34
35 host_path = url_path_join(host, self.hub.server.base_url)
36 referer_path = referer.split('://', 1)[-1]
37 if not (referer_path + '/').startswith(host_path):
38 self.log.warning("Blocking Cross Origin API request. Referer: %s, Host: %s",
39 referer, host_path)
40 return False
41 return True
42
43 def get_current_user_cookie(self):
44 """Override get_user_cookie to check Referer header"""
45 cookie_user = super().get_current_user_cookie()
46 # check referer only if there is a cookie user,
47 # avoiding misleading "Blocking Cross Origin" messages
48 # when there's no cookie set anyway.
49 if cookie_user and not self.check_referer():
50 return None
51 return cookie_user
52
53 def get_json_body(self):
54 """Return the body of the request as JSON data."""
55 if not self.request.body:
56 return None
57 body = self.request.body.strip().decode('utf-8')
58 try:
59 model = json.loads(body)
60 except Exception:
61 self.log.debug("Bad JSON: %r", body)
62 self.log.error("Couldn't parse JSON", exc_info=True)
63 raise web.HTTPError(400, 'Invalid JSON in body of request')
64 return model
65
66 def write_error(self, status_code, **kwargs):
67 """Write JSON errors instead of HTML"""
68 exc_info = kwargs.get('exc_info')
69 message = ''
70 status_message = responses.get(status_code, 'Unknown Error')
71 if exc_info:
72 exception = exc_info[1]
73 # get the custom message, if defined
74 try:
75 message = exception.log_message % exception.args
76 except Exception:
77 pass
78
79 # construct the custom reason, if defined
80 reason = getattr(exception, 'reason', '')
81 if reason:
82 status_message = reason
83 self.set_header('Content-Type', 'application/json')
84 self.write(json.dumps({
85 'status': status_code,
86 'message': message or status_message,
87 }))
88
89 def user_model(self, user):
90 """Get the JSON model for a User object"""
91 model = {
92 'name': user.name,
93 'admin': user.admin,
94 'groups': [ g.name for g in user.groups ],
95 'server': user.url if user.running else None,
96 'pending': None,
97 'last_activity': user.last_activity.isoformat(),
98 }
99 if user.spawn_pending:
100 model['pending'] = 'spawn'
101 elif user.stop_pending:
102 model['pending'] = 'stop'
103 return model
104
105 def group_model(self, group):
106 """Get the JSON model for a Group object"""
107 return {
108 'name': group.name,
109 'users': [ u.name for u in group.users ]
110 }
111
112 _user_model_types = {
113 'name': str,
114 'admin': bool,
115 'groups': list,
116 }
117
118 _group_model_types = {
119 'name': str,
120 'users': list,
121 }
122
123 def _check_model(self, model, model_types, name):
124 """Check a model provided by a REST API request
125
126 Args:
127 model (dict): user-provided model
128 model_types (dict): dict of key:type used to validate types and keys
129 name (str): name of the model, used in error messages
130 """
131 if not isinstance(model, dict):
132 raise web.HTTPError(400, "Invalid JSON data: %r" % model)
133 if not set(model).issubset(set(model_types)):
134 raise web.HTTPError(400, "Invalid JSON keys: %r" % model)
135 for key, value in model.items():
136 if not isinstance(value, model_types[key]):
137 raise web.HTTPError(400, "%s.%s must be %s, not: %r" % (
138 name, key, model_types[key], type(value)
139 ))
140
141 def _check_user_model(self, model):
142 """Check a request-provided user model from a REST API"""
143 return self._check_model(model, self._user_model_types, 'user')
144 for groupname in model.get('groups', []):
145 if not isinstance(groupname, str):
146 raise web.HTTPError(400, "group names must be str, not %r" % type(groupname))
147
148 def _check_group_model(self, model):
149 """Check a request-provided user model from a REST API"""
150 self._check_model(model, self._group_model_types, 'group')
151 for username in model.get('users', []):
152 if not isinstance(username, str):
153 raise web.HTTPError(400, "usernames must be str, not %r" % type(groupname))
154
155 def options(self, *args, **kwargs):
156 self.set_header('Access-Control-Allow-Headers', 'accept, content-type')
157 self.finish()
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py
--- a/jupyterhub/apihandlers/base.py
+++ b/jupyterhub/apihandlers/base.py
@@ -140,19 +140,18 @@
def _check_user_model(self, model):
"""Check a request-provided user model from a REST API"""
- return self._check_model(model, self._user_model_types, 'user')
- for groupname in model.get('groups', []):
- if not isinstance(groupname, str):
- raise web.HTTPError(400, "group names must be str, not %r" % type(groupname))
+ self._check_model(model, self._user_model_types, 'user')
+ for username in model.get('users', []):
+ if not isinstance(username, str):
+ raise web.HTTPError(400, ("usernames must be str, not %r", type(username)))
def _check_group_model(self, model):
- """Check a request-provided user model from a REST API"""
+ """Check a request-provided group model from a REST API"""
self._check_model(model, self._group_model_types, 'group')
- for username in model.get('users', []):
- if not isinstance(username, str):
- raise web.HTTPError(400, "usernames must be str, not %r" % type(groupname))
+ for groupname in model.get('groups', []):
+ if not isinstance(groupname, str):
+ raise web.HTTPError(400, ("group names must be str, not %r", type(groupname)))
def options(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Headers', 'accept, content-type')
self.finish()
-
\ No newline at end of file
|
{"golden_diff": "diff --git a/jupyterhub/apihandlers/base.py b/jupyterhub/apihandlers/base.py\n--- a/jupyterhub/apihandlers/base.py\n+++ b/jupyterhub/apihandlers/base.py\n@@ -140,19 +140,18 @@\n \n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n- return self._check_model(model, self._user_model_types, 'user')\n- for groupname in model.get('groups', []):\n- if not isinstance(groupname, str):\n- raise web.HTTPError(400, \"group names must be str, not %r\" % type(groupname))\n+ self._check_model(model, self._user_model_types, 'user')\n+ for username in model.get('users', []):\n+ if not isinstance(username, str):\n+ raise web.HTTPError(400, (\"usernames must be str, not %r\", type(username)))\n \n def _check_group_model(self, model):\n- \"\"\"Check a request-provided user model from a REST API\"\"\"\n+ \"\"\"Check a request-provided group model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n- for username in model.get('users', []):\n- if not isinstance(username, str):\n- raise web.HTTPError(400, \"usernames must be str, not %r\" % type(groupname))\n+ for groupname in model.get('groups', []):\n+ if not isinstance(groupname, str):\n+ raise web.HTTPError(400, (\"group names must be str, not %r\", type(groupname)))\n \n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n- \n\\ No newline at end of file\n", "issue": "Remove or edit unreachable code block\n@minrk It looks like `_check_user_model` has a few code lines that don't seem to be reachable. Should we remove the lines after the return statement? https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/apihandlers/base.py#L144\n\n", "before_files": [{"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\nfrom ..utils import url_path_join\n\nclass APIHandler(BaseHandler):\n\n def check_referer(self):\n \"\"\"Check Origin for cross-site API requests.\n \n Copied from WebSocket with changes:\n \n - allow unspecified host/referer (e.g. scripts)\n \"\"\"\n host = self.request.headers.get(\"Host\")\n referer = self.request.headers.get(\"Referer\")\n\n # If no header is provided, assume it comes from a script/curl.\n # We are only concerned with cross-site browser stuff here.\n if not host:\n self.log.warning(\"Blocking API request with no host\")\n return False\n if not referer:\n self.log.warning(\"Blocking API request with no referer\")\n return False\n \n host_path = url_path_join(host, self.hub.server.base_url)\n referer_path = referer.split('://', 1)[-1]\n if not (referer_path + '/').startswith(host_path):\n self.log.warning(\"Blocking Cross Origin API request. Referer: %s, Host: %s\",\n referer, host_path)\n return False\n return True\n \n def get_current_user_cookie(self):\n \"\"\"Override get_user_cookie to check Referer header\"\"\"\n cookie_user = super().get_current_user_cookie()\n # check referer only if there is a cookie user,\n # avoiding misleading \"Blocking Cross Origin\" messages\n # when there's no cookie set anyway.\n if cookie_user and not self.check_referer():\n return None\n return cookie_user\n\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n\n def user_model(self, user):\n \"\"\"Get the JSON model for a User object\"\"\"\n model = {\n 'name': user.name,\n 'admin': user.admin,\n 'groups': [ g.name for g in user.groups ],\n 'server': user.url if user.running else None,\n 'pending': None,\n 'last_activity': user.last_activity.isoformat(),\n }\n if user.spawn_pending:\n model['pending'] = 'spawn'\n elif user.stop_pending:\n model['pending'] = 'stop'\n return model\n\n def group_model(self, group):\n \"\"\"Get the JSON model for a Group object\"\"\"\n return {\n 'name': group.name,\n 'users': [ u.name for u in group.users ]\n }\n\n _user_model_types = {\n 'name': str,\n 'admin': bool,\n 'groups': list,\n }\n\n _group_model_types = {\n 'name': str,\n 'users': list,\n }\n\n def _check_model(self, model, model_types, name):\n \"\"\"Check a model provided by a REST API request\n \n Args:\n model (dict): user-provided model\n model_types (dict): dict of key:type used to validate types and keys\n name (str): name of the model, used in error messages\n \"\"\"\n if not isinstance(model, dict):\n raise web.HTTPError(400, \"Invalid JSON data: %r\" % model)\n if not set(model).issubset(set(model_types)):\n raise web.HTTPError(400, \"Invalid JSON keys: %r\" % model)\n for key, value in model.items():\n if not isinstance(value, model_types[key]):\n raise web.HTTPError(400, \"%s.%s must be %s, not: %r\" % (\n name, key, model_types[key], type(value)\n ))\n\n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n return self._check_model(model, self._user_model_types, 'user')\n for groupname in model.get('groups', []):\n if not isinstance(groupname, str):\n raise web.HTTPError(400, \"group names must be str, not %r\" % type(groupname))\n\n def _check_group_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n for username in model.get('users', []):\n if not isinstance(username, str):\n raise web.HTTPError(400, \"usernames must be str, not %r\" % type(groupname))\n\n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n ", "path": "jupyterhub/apihandlers/base.py"}], "after_files": [{"content": "\"\"\"Base API handlers\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport json\n\nfrom http.client import responses\n\nfrom tornado import web\n\nfrom ..handlers import BaseHandler\nfrom ..utils import url_path_join\n\nclass APIHandler(BaseHandler):\n\n def check_referer(self):\n \"\"\"Check Origin for cross-site API requests.\n \n Copied from WebSocket with changes:\n \n - allow unspecified host/referer (e.g. scripts)\n \"\"\"\n host = self.request.headers.get(\"Host\")\n referer = self.request.headers.get(\"Referer\")\n\n # If no header is provided, assume it comes from a script/curl.\n # We are only concerned with cross-site browser stuff here.\n if not host:\n self.log.warning(\"Blocking API request with no host\")\n return False\n if not referer:\n self.log.warning(\"Blocking API request with no referer\")\n return False\n \n host_path = url_path_join(host, self.hub.server.base_url)\n referer_path = referer.split('://', 1)[-1]\n if not (referer_path + '/').startswith(host_path):\n self.log.warning(\"Blocking Cross Origin API request. Referer: %s, Host: %s\",\n referer, host_path)\n return False\n return True\n \n def get_current_user_cookie(self):\n \"\"\"Override get_user_cookie to check Referer header\"\"\"\n cookie_user = super().get_current_user_cookie()\n # check referer only if there is a cookie user,\n # avoiding misleading \"Blocking Cross Origin\" messages\n # when there's no cookie set anyway.\n if cookie_user and not self.check_referer():\n return None\n return cookie_user\n\n def get_json_body(self):\n \"\"\"Return the body of the request as JSON data.\"\"\"\n if not self.request.body:\n return None\n body = self.request.body.strip().decode('utf-8')\n try:\n model = json.loads(body)\n except Exception:\n self.log.debug(\"Bad JSON: %r\", body)\n self.log.error(\"Couldn't parse JSON\", exc_info=True)\n raise web.HTTPError(400, 'Invalid JSON in body of request')\n return model\n \n def write_error(self, status_code, **kwargs):\n \"\"\"Write JSON errors instead of HTML\"\"\"\n exc_info = kwargs.get('exc_info')\n message = ''\n status_message = responses.get(status_code, 'Unknown Error')\n if exc_info:\n exception = exc_info[1]\n # get the custom message, if defined\n try:\n message = exception.log_message % exception.args\n except Exception:\n pass\n\n # construct the custom reason, if defined\n reason = getattr(exception, 'reason', '')\n if reason:\n status_message = reason\n self.set_header('Content-Type', 'application/json')\n self.write(json.dumps({\n 'status': status_code,\n 'message': message or status_message,\n }))\n\n def user_model(self, user):\n \"\"\"Get the JSON model for a User object\"\"\"\n model = {\n 'name': user.name,\n 'admin': user.admin,\n 'groups': [ g.name for g in user.groups ],\n 'server': user.url if user.running else None,\n 'pending': None,\n 'last_activity': user.last_activity.isoformat(),\n }\n if user.spawn_pending:\n model['pending'] = 'spawn'\n elif user.stop_pending:\n model['pending'] = 'stop'\n return model\n\n def group_model(self, group):\n \"\"\"Get the JSON model for a Group object\"\"\"\n return {\n 'name': group.name,\n 'users': [ u.name for u in group.users ]\n }\n\n _user_model_types = {\n 'name': str,\n 'admin': bool,\n 'groups': list,\n }\n\n _group_model_types = {\n 'name': str,\n 'users': list,\n }\n\n def _check_model(self, model, model_types, name):\n \"\"\"Check a model provided by a REST API request\n \n Args:\n model (dict): user-provided model\n model_types (dict): dict of key:type used to validate types and keys\n name (str): name of the model, used in error messages\n \"\"\"\n if not isinstance(model, dict):\n raise web.HTTPError(400, \"Invalid JSON data: %r\" % model)\n if not set(model).issubset(set(model_types)):\n raise web.HTTPError(400, \"Invalid JSON keys: %r\" % model)\n for key, value in model.items():\n if not isinstance(value, model_types[key]):\n raise web.HTTPError(400, \"%s.%s must be %s, not: %r\" % (\n name, key, model_types[key], type(value)\n ))\n\n def _check_user_model(self, model):\n \"\"\"Check a request-provided user model from a REST API\"\"\"\n self._check_model(model, self._user_model_types, 'user')\n for username in model.get('users', []):\n if not isinstance(username, str):\n raise web.HTTPError(400, (\"usernames must be str, not %r\", type(username)))\n\n def _check_group_model(self, model):\n \"\"\"Check a request-provided group model from a REST API\"\"\"\n self._check_model(model, self._group_model_types, 'group')\n for groupname in model.get('groups', []):\n if not isinstance(groupname, str):\n raise web.HTTPError(400, (\"group names must be str, not %r\", type(groupname)))\n\n def options(self, *args, **kwargs):\n self.set_header('Access-Control-Allow-Headers', 'accept, content-type')\n self.finish()\n", "path": "jupyterhub/apihandlers/base.py"}]}
| 1,952 | 408 |
gh_patches_debug_25209
|
rasdani/github-patches
|
git_diff
|
urllib3__urllib3-516
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support body param in RequestMethods.request
Easiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a "passed twice" error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `urllib3/request.py`
Content:
```
1 try:
2 from urllib.parse import urlencode
3 except ImportError:
4 from urllib import urlencode
5
6 from .filepost import encode_multipart_formdata
7
8
9 __all__ = ['RequestMethods']
10
11
12 class RequestMethods(object):
13 """
14 Convenience mixin for classes who implement a :meth:`urlopen` method, such
15 as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
16 :class:`~urllib3.poolmanager.PoolManager`.
17
18 Provides behavior for making common types of HTTP request methods and
19 decides which type of request field encoding to use.
20
21 Specifically,
22
23 :meth:`.request_encode_url` is for sending requests whose fields are
24 encoded in the URL (such as GET, HEAD, DELETE).
25
26 :meth:`.request_encode_body` is for sending requests whose fields are
27 encoded in the *body* of the request using multipart or www-form-urlencoded
28 (such as for POST, PUT, PATCH).
29
30 :meth:`.request` is for making any kind of request, it will look up the
31 appropriate encoding format and use one of the above two methods to make
32 the request.
33
34 Initializer parameters:
35
36 :param headers:
37 Headers to include with all requests, unless other headers are given
38 explicitly.
39 """
40
41 _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
42
43 def __init__(self, headers=None):
44 self.headers = headers or {}
45
46 def urlopen(self, method, url, body=None, headers=None,
47 encode_multipart=True, multipart_boundary=None,
48 **kw): # Abstract
49 raise NotImplemented("Classes extending RequestMethods must implement "
50 "their own ``urlopen`` method.")
51
52 def request(self, method, url, fields=None, headers=None, **urlopen_kw):
53 """
54 Make a request using :meth:`urlopen` with the appropriate encoding of
55 ``fields`` based on the ``method`` used.
56
57 This is a convenience method that requires the least amount of manual
58 effort. It can be used in most situations, while still having the
59 option to drop down to more specific methods when necessary, such as
60 :meth:`request_encode_url`, :meth:`request_encode_body`,
61 or even the lowest level :meth:`urlopen`.
62 """
63 method = method.upper()
64
65 if method in self._encode_url_methods:
66 return self.request_encode_url(method, url, fields=fields,
67 headers=headers,
68 **urlopen_kw)
69 else:
70 return self.request_encode_body(method, url, fields=fields,
71 headers=headers,
72 **urlopen_kw)
73
74 def request_encode_url(self, method, url, fields=None, **urlopen_kw):
75 """
76 Make a request using :meth:`urlopen` with the ``fields`` encoded in
77 the url. This is useful for request methods like GET, HEAD, DELETE, etc.
78 """
79 if fields:
80 url += '?' + urlencode(fields)
81 return self.urlopen(method, url, **urlopen_kw)
82
83 def request_encode_body(self, method, url, fields=None, headers=None,
84 encode_multipart=True, multipart_boundary=None,
85 **urlopen_kw):
86 """
87 Make a request using :meth:`urlopen` with the ``fields`` encoded in
88 the body. This is useful for request methods like POST, PUT, PATCH, etc.
89
90 When ``encode_multipart=True`` (default), then
91 :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
92 the payload with the appropriate content type. Otherwise
93 :meth:`urllib.urlencode` is used with the
94 'application/x-www-form-urlencoded' content type.
95
96 Multipart encoding must be used when posting files, and it's reasonably
97 safe to use it in other times too. However, it may break request
98 signing, such as with OAuth.
99
100 Supports an optional ``fields`` parameter of key/value strings AND
101 key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
102 the MIME type is optional. For example::
103
104 fields = {
105 'foo': 'bar',
106 'fakefile': ('foofile.txt', 'contents of foofile'),
107 'realfile': ('barfile.txt', open('realfile').read()),
108 'typedfile': ('bazfile.bin', open('bazfile').read(),
109 'image/jpeg'),
110 'nonamefile': 'contents of nonamefile field',
111 }
112
113 When uploading a file, providing a filename (the first parameter of the
114 tuple) is optional but recommended to best mimick behavior of browsers.
115
116 Note that if ``headers`` are supplied, the 'Content-Type' header will
117 be overwritten because it depends on the dynamic random boundary string
118 which is used to compose the body of the request. The random boundary
119 string can be explicitly set with the ``multipart_boundary`` parameter.
120 """
121 if encode_multipart:
122 body, content_type = encode_multipart_formdata(
123 fields or {}, boundary=multipart_boundary)
124 else:
125 body, content_type = (urlencode(fields or {}),
126 'application/x-www-form-urlencoded')
127
128 if headers is None:
129 headers = self.headers
130
131 headers_ = {'Content-Type': content_type}
132 headers_.update(headers)
133
134 return self.urlopen(method, url, body=body, headers=headers_,
135 **urlopen_kw)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/urllib3/request.py b/urllib3/request.py
--- a/urllib3/request.py
+++ b/urllib3/request.py
@@ -118,18 +118,24 @@
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
- if encode_multipart:
- body, content_type = encode_multipart_formdata(
- fields or {}, boundary=multipart_boundary)
- else:
- body, content_type = (urlencode(fields or {}),
- 'application/x-www-form-urlencoded')
-
if headers is None:
headers = self.headers
- headers_ = {'Content-Type': content_type}
- headers_.update(headers)
+ extra_kw = {'headers': {}}
+
+ if fields:
+ if 'body' in urlopen_kw:
+ raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.')
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+ else:
+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+ extra_kw['body'] = body
+ extra_kw['headers'] = {'Content-Type': content_type}
+
+ extra_kw['headers'].update(headers)
+ extra_kw.update(urlopen_kw)
- return self.urlopen(method, url, body=body, headers=headers_,
- **urlopen_kw)
+ return self.urlopen(method, url, **extra_kw)
|
{"golden_diff": "diff --git a/urllib3/request.py b/urllib3/request.py\n--- a/urllib3/request.py\n+++ b/urllib3/request.py\n@@ -118,18 +118,24 @@\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n- if encode_multipart:\n- body, content_type = encode_multipart_formdata(\n- fields or {}, boundary=multipart_boundary)\n- else:\n- body, content_type = (urlencode(fields or {}),\n- 'application/x-www-form-urlencoded')\n-\n if headers is None:\n headers = self.headers\n \n- headers_ = {'Content-Type': content_type}\n- headers_.update(headers)\n+ extra_kw = {'headers': {}}\n+\n+ if fields:\n+ if 'body' in urlopen_kw:\n+ raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n+\n+ if encode_multipart:\n+ body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n+ else:\n+ body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n+\n+ extra_kw['body'] = body\n+ extra_kw['headers'] = {'Content-Type': content_type}\n+\n+ extra_kw['headers'].update(headers)\n+ extra_kw.update(urlopen_kw)\n \n- return self.urlopen(method, url, body=body, headers=headers_,\n- **urlopen_kw)\n+ return self.urlopen(method, url, **extra_kw)\n", "issue": "Support body param in RequestMethods.request\nEasiest way to do this is by avoiding defining a body kw when no fields are given, then if both are given it will naturally raise a \"passed twice\" error.\n\n", "before_files": [{"content": "try:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n :class:`~urllib3.poolmanager.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(self, method, url, body=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **kw): # Abstract\n raise NotImplemented(\"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\")\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if method in self._encode_url_methods:\n return self.request_encode_url(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n else:\n return self.request_encode_body(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n\n def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if fields:\n url += '?' + urlencode(fields)\n return self.urlopen(method, url, **urlopen_kw)\n\n def request_encode_body(self, method, url, fields=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :meth:`urllib.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimick behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields or {}, boundary=multipart_boundary)\n else:\n body, content_type = (urlencode(fields or {}),\n 'application/x-www-form-urlencoded')\n\n if headers is None:\n headers = self.headers\n\n headers_ = {'Content-Type': content_type}\n headers_.update(headers)\n\n return self.urlopen(method, url, body=body, headers=headers_,\n **urlopen_kw)\n", "path": "urllib3/request.py"}], "after_files": [{"content": "try:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\nfrom .filepost import encode_multipart_formdata\n\n\n__all__ = ['RequestMethods']\n\n\nclass RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`~urllib3.connectionpool.HTTPConnectionPool` and\n :class:`~urllib3.poolmanager.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(self, method, url, body=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **kw): # Abstract\n raise NotImplemented(\"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\")\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if method in self._encode_url_methods:\n return self.request_encode_url(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n else:\n return self.request_encode_body(method, url, fields=fields,\n headers=headers,\n **urlopen_kw)\n\n def request_encode_url(self, method, url, fields=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if fields:\n url += '?' + urlencode(fields)\n return self.urlopen(method, url, **urlopen_kw)\n\n def request_encode_body(self, method, url, fields=None, headers=None,\n encode_multipart=True, multipart_boundary=None,\n **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :meth:`urllib.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimick behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {'headers': {}}\n\n if fields:\n if 'body' in urlopen_kw:\n raise TypeError('request got values for both \\'fields\\' and \\'body\\', can only specify one.')\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)\n else:\n body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'\n\n extra_kw['body'] = body\n extra_kw['headers'] = {'Content-Type': content_type}\n\n extra_kw['headers'].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)\n", "path": "urllib3/request.py"}]}
| 1,766 | 351 |
gh_patches_debug_38491
|
rasdani/github-patches
|
git_diff
|
cleanlab__cleanlab-423
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Token Classification: given_labels -> labels
Rename argument `given_labels` to just `labels` throughout the module to be more consistent with rest of package.
Example: https://github.com/cleanlab/cleanlab/blob/master/cleanlab/token_classification/summary.py#L33
Need to correspondingly update tutorial notebook as well, possibly also example notebook but I don't think so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cleanlab/token_classification/summary.py`
Content:
```
1 # Copyright (C) 2017-2022 Cleanlab Inc.
2 # This file is part of cleanlab.
3 #
4 # cleanlab is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as published
6 # by the Free Software Foundation, either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # cleanlab is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU Affero General Public License for more details.
13 #
14 # You should have received a copy of the GNU Affero General Public License
15 # along with cleanlab. If not, see <https://www.gnu.org/licenses/>.
16
17 """
18 Methods to display sentences and their label issues in a token classification dataset (text data), as well as summarize the types of issues identified.
19 """
20
21 import numpy as np
22 import pandas as pd
23 from typing import List, Optional, Tuple, Dict, Any
24
25 from cleanlab.internal.token_classification_utils import get_sentence, color_sentence
26
27
28 def display_issues(
29 issues: list,
30 given_words: List[List[str]],
31 *,
32 pred_probs: Optional[list] = None,
33 given_labels: Optional[list] = None,
34 exclude: List[Tuple[int, int]] = [],
35 class_names: Optional[List[str]] = None,
36 top: int = 20
37 ) -> None:
38 """
39 Display issues, including sentence with issue token highlighted. Also shows given and predicted label
40 if possible.
41
42 Parameters
43 ----------
44 issues:
45 list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.
46
47 given_words:
48 tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from
49 the original file.
50
51 pred_probs:
52 list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of
53 the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th
54 sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted
55 label of the token.
56
57 given_labels:
58 list of given labels, such that `given_labels[i]` is a list containing the given labels of the tokens in the
59 i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also
60 displays the given label of the token.
61
62 exclude:
63 list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between
64 class 0 and 1 are not displayed.
65
66 class_names:
67 name of classes. If not provided, display the integer index for predicted and given labels.
68
69 top: int, default=20
70 maximum number of outputs to be printed.
71
72 """
73 if not class_names:
74 print(
75 "Classes will be printed in terms of their integer index since `class_names` was not provided. "
76 )
77 print("Specify this argument to see the string names of each class. \n")
78
79 shown = 0
80 is_tuple = isinstance(issues[0], tuple)
81
82 for issue in issues:
83 if is_tuple:
84 i, j = issue
85 sentence = get_sentence(given_words[i])
86 word = given_words[i][j]
87
88 if pred_probs:
89 prediction = pred_probs[i][j].argmax()
90 if given_labels:
91 given = given_labels[i][j]
92 if pred_probs and given_labels:
93 if (given, prediction) in exclude:
94 continue
95
96 if pred_probs and class_names:
97 prediction = class_names[prediction]
98 if given_labels and class_names:
99 given = class_names[given]
100
101 shown += 1
102 print("Sentence %d, token %d: \n%s" % (i, j, color_sentence(sentence, word)))
103 if given_labels and not pred_probs:
104 print("Given label: %s\n" % str(given))
105 elif not given_labels and pred_probs:
106 print("Predicted label according to provided pred_probs: %s\n" % str(prediction))
107 elif given_labels and pred_probs:
108 print(
109 "Given label: %s, predicted label according to provided pred_probs: %s\n"
110 % (str(given), str(prediction))
111 )
112 else:
113 print()
114 else:
115 shown += 1
116 sentence = get_sentence(given_words[issue])
117 print("Sentence %d: %s\n" % (issue, sentence))
118 if shown == top:
119 break
120
121
122 def common_label_issues(
123 issues: List[Tuple[int, int]],
124 given_words: List[List[str]],
125 *,
126 labels: Optional[list] = None,
127 pred_probs: Optional[list] = None,
128 class_names: Optional[List[str]] = None,
129 top: int = 10,
130 exclude: List[Tuple[int, int]] = [],
131 verbose: bool = True
132 ) -> pd.DataFrame:
133 """
134 Display the most common tokens that are potentially mislabeled.
135
136 Parameters
137 ----------
138 issues:
139 list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.
140
141 given_words:
142 tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from
143 the original file.
144
145 labels:
146 list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the i'th
147 sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also
148 displays the given label of the token.
149
150 pred_probs:
151 list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of
152 the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th
153 sentence, and `K` is the number of classes predicted by the model. If both `labels` and `pred_probs` are
154 provided, also evaluate each type of given/predicted label swap.
155
156 class_names:
157 name of classes. If not provided, display the integer index for predicted and given labels.
158
159 top:
160 maximum number of outputs to be printed.
161
162 exclude:
163 list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between
164 class 0 and 1 are not displayed.
165
166 verbose:
167 if set to True, also display each type of given/predicted label swap for each token.
168
169 Returns
170 ---------
171 df:
172 if both `labels` and `pred_probs` are provided, return a data frame with columns ['token', 'given_label',
173 'predicted_label', 'num_label_issues'], and each row contains the information for a specific token and
174 given/predicted label swap, ordered by the number of label issues in descending order. Otherwise, return
175 a data frame with columns ['token', 'num_label_issues'], and each row contains the information for a specific
176 token, ordered by the number of label issues in descending order.
177
178 """
179 count: Dict[str, Any] = {}
180 if not labels or not pred_probs:
181 for issue in issues:
182 i, j = issue
183 word = given_words[i][j]
184 if word not in count:
185 count[word] = 0
186 count[word] += 1
187
188 words = [word for word in count.keys()]
189 freq = [count[word] for word in words]
190 rank = np.argsort(freq)[::-1][:top]
191
192 for r in rank:
193 print(
194 "Token '%s' is potentially mislabeled %d times throughout the dataset\n"
195 % (words[r], freq[r])
196 )
197
198 info = [[word, f] for word, f in zip(words, freq)]
199 info = sorted(info, key=lambda x: x[1], reverse=True)
200 return pd.DataFrame(info, columns=["token", "num_label_issues"])
201
202 if not class_names:
203 print(
204 "Classes will be printed in terms of their integer index since `class_names` was not provided. "
205 )
206 print("Specify this argument to see the string names of each class. \n")
207
208 n = pred_probs[0].shape[1]
209 for issue in issues:
210 i, j = issue
211 word = given_words[i][j]
212 label = labels[i][j]
213 pred = pred_probs[i][j].argmax()
214 if word not in count:
215 count[word] = np.zeros([n, n], dtype=int)
216 if (label, pred) not in exclude:
217 count[word][label][pred] += 1
218 words = [word for word in count.keys()]
219 freq = [np.sum(count[word]) for word in words]
220 rank = np.argsort(freq)[::-1][:top]
221
222 for r in rank:
223 matrix = count[words[r]]
224 most_frequent = np.argsort(count[words[r]].flatten())[::-1]
225 print(
226 "Token '%s' is potentially mislabeled %d times throughout the dataset"
227 % (words[r], freq[r])
228 )
229 if verbose:
230 print(
231 "---------------------------------------------------------------------------------------"
232 )
233 for f in most_frequent:
234 i, j = f // n, f % n
235 if matrix[i][j] == 0:
236 break
237 if class_names:
238 print(
239 "labeled as class `%s` but predicted to actually be class `%s` %d times"
240 % (class_names[i], class_names[j], matrix[i][j])
241 )
242 else:
243 print(
244 "labeled as class %d but predicted to actually be class %d %d times"
245 % (i, j, matrix[i][j])
246 )
247 print()
248 info = []
249 for word in words:
250 for i in range(n):
251 for j in range(n):
252 num = count[word][i][j]
253 if num > 0:
254 if not class_names:
255 info.append([word, i, j, num])
256 else:
257 info.append([word, class_names[i], class_names[j], num])
258 info = sorted(info, key=lambda x: x[3], reverse=True)
259 return pd.DataFrame(
260 info, columns=["token", "given_label", "predicted_label", "num_label_issues"]
261 )
262
263
264 def filter_by_token(
265 token: str, issues: List[Tuple[int, int]], given_words: List[List[str]]
266 ) -> List[Tuple[int, int]]:
267 """
268 Searches a specific token within all issue tokens
269
270 Parameters
271 ----------
272 token:
273 the specific token the user is looking for
274
275 issues:
276 list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.
277
278 given_words:
279 tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from
280 the original file.
281
282 Returns
283 ----------
284 returned_issues:
285 list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.
286
287 """
288 returned_issues = []
289 for issue in issues:
290 i, j = issue
291 if token.lower() == given_words[i][j].lower():
292 returned_issues.append(issue)
293 return returned_issues
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cleanlab/token_classification/summary.py b/cleanlab/token_classification/summary.py
--- a/cleanlab/token_classification/summary.py
+++ b/cleanlab/token_classification/summary.py
@@ -9,7 +9,7 @@
given_words: List[List[str]],
*,
pred_probs: Optional[list] = None,
- given_labels: Optional[list] = None,
+ labels: Optional[list] = None,
exclude: List[Tuple[int, int]] = [],
class_names: Optional[List[str]] = None,
top: int = 20
@@ -33,8 +33,8 @@
sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted
label of the token.
- given_labels:
- list of given labels, such that `given_labels[i]` is a list containing the given labels of the tokens in the
+ labels:
+ list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the
i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also
displays the given label of the token.
@@ -66,24 +66,24 @@
if pred_probs:
prediction = pred_probs[i][j].argmax()
- if given_labels:
- given = given_labels[i][j]
- if pred_probs and given_labels:
+ if labels:
+ given = labels[i][j]
+ if pred_probs and labels:
if (given, prediction) in exclude:
continue
if pred_probs and class_names:
prediction = class_names[prediction]
- if given_labels and class_names:
+ if labels and class_names:
given = class_names[given]
shown += 1
print("Sentence %d, token %d: \n%s" % (i, j, color_sentence(sentence, word)))
- if given_labels and not pred_probs:
+ if labels and not pred_probs:
print("Given label: %s\n" % str(given))
- elif not given_labels and pred_probs:
+ elif not labels and pred_probs:
print("Predicted label according to provided pred_probs: %s\n" % str(prediction))
- elif given_labels and pred_probs:
+ elif labels and pred_probs:
print(
"Given label: %s, predicted label according to provided pred_probs: %s\n"
% (str(given), str(prediction))
|
{"golden_diff": "diff --git a/cleanlab/token_classification/summary.py b/cleanlab/token_classification/summary.py\n--- a/cleanlab/token_classification/summary.py\n+++ b/cleanlab/token_classification/summary.py\n@@ -9,7 +9,7 @@\n given_words: List[List[str]],\n *,\n pred_probs: Optional[list] = None,\n- given_labels: Optional[list] = None,\n+ labels: Optional[list] = None,\n exclude: List[Tuple[int, int]] = [],\n class_names: Optional[List[str]] = None,\n top: int = 20\n@@ -33,8 +33,8 @@\n sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted\n label of the token.\n \n- given_labels:\n- list of given labels, such that `given_labels[i]` is a list containing the given labels of the tokens in the\n+ labels:\n+ list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the\n i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also\n displays the given label of the token.\n \n@@ -66,24 +66,24 @@\n \n if pred_probs:\n prediction = pred_probs[i][j].argmax()\n- if given_labels:\n- given = given_labels[i][j]\n- if pred_probs and given_labels:\n+ if labels:\n+ given = labels[i][j]\n+ if pred_probs and labels:\n if (given, prediction) in exclude:\n continue\n \n if pred_probs and class_names:\n prediction = class_names[prediction]\n- if given_labels and class_names:\n+ if labels and class_names:\n given = class_names[given]\n \n shown += 1\n print(\"Sentence %d, token %d: \\n%s\" % (i, j, color_sentence(sentence, word)))\n- if given_labels and not pred_probs:\n+ if labels and not pred_probs:\n print(\"Given label: %s\\n\" % str(given))\n- elif not given_labels and pred_probs:\n+ elif not labels and pred_probs:\n print(\"Predicted label according to provided pred_probs: %s\\n\" % str(prediction))\n- elif given_labels and pred_probs:\n+ elif labels and pred_probs:\n print(\n \"Given label: %s, predicted label according to provided pred_probs: %s\\n\"\n % (str(given), str(prediction))\n", "issue": "Token Classification: given_labels -> labels\nRename argument `given_labels` to just `labels` throughout the module to be more consistent with rest of package.\n\nExample: https://github.com/cleanlab/cleanlab/blob/master/cleanlab/token_classification/summary.py#L33\n\nNeed to correspondingly update tutorial notebook as well, possibly also example notebook but I don't think so.\n", "before_files": [{"content": "# Copyright (C) 2017-2022 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\nMethods to display sentences and their label issues in a token classification dataset (text data), as well as summarize the types of issues identified.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom typing import List, Optional, Tuple, Dict, Any\n\nfrom cleanlab.internal.token_classification_utils import get_sentence, color_sentence\n\n\ndef display_issues(\n issues: list,\n given_words: List[List[str]],\n *,\n pred_probs: Optional[list] = None,\n given_labels: Optional[list] = None,\n exclude: List[Tuple[int, int]] = [],\n class_names: Optional[List[str]] = None,\n top: int = 20\n) -> None:\n \"\"\"\n Display issues, including sentence with issue token highlighted. Also shows given and predicted label\n if possible.\n\n Parameters\n ----------\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n pred_probs:\n list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of\n the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th\n sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted\n label of the token.\n\n given_labels:\n list of given labels, such that `given_labels[i]` is a list containing the given labels of the tokens in the\n i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also\n displays the given label of the token.\n\n exclude:\n list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between\n class 0 and 1 are not displayed.\n\n class_names:\n name of classes. If not provided, display the integer index for predicted and given labels.\n\n top: int, default=20\n maximum number of outputs to be printed.\n\n \"\"\"\n if not class_names:\n print(\n \"Classes will be printed in terms of their integer index since `class_names` was not provided. \"\n )\n print(\"Specify this argument to see the string names of each class. \\n\")\n\n shown = 0\n is_tuple = isinstance(issues[0], tuple)\n\n for issue in issues:\n if is_tuple:\n i, j = issue\n sentence = get_sentence(given_words[i])\n word = given_words[i][j]\n\n if pred_probs:\n prediction = pred_probs[i][j].argmax()\n if given_labels:\n given = given_labels[i][j]\n if pred_probs and given_labels:\n if (given, prediction) in exclude:\n continue\n\n if pred_probs and class_names:\n prediction = class_names[prediction]\n if given_labels and class_names:\n given = class_names[given]\n\n shown += 1\n print(\"Sentence %d, token %d: \\n%s\" % (i, j, color_sentence(sentence, word)))\n if given_labels and not pred_probs:\n print(\"Given label: %s\\n\" % str(given))\n elif not given_labels and pred_probs:\n print(\"Predicted label according to provided pred_probs: %s\\n\" % str(prediction))\n elif given_labels and pred_probs:\n print(\n \"Given label: %s, predicted label according to provided pred_probs: %s\\n\"\n % (str(given), str(prediction))\n )\n else:\n print()\n else:\n shown += 1\n sentence = get_sentence(given_words[issue])\n print(\"Sentence %d: %s\\n\" % (issue, sentence))\n if shown == top:\n break\n\n\ndef common_label_issues(\n issues: List[Tuple[int, int]],\n given_words: List[List[str]],\n *,\n labels: Optional[list] = None,\n pred_probs: Optional[list] = None,\n class_names: Optional[List[str]] = None,\n top: int = 10,\n exclude: List[Tuple[int, int]] = [],\n verbose: bool = True\n) -> pd.DataFrame:\n \"\"\"\n Display the most common tokens that are potentially mislabeled.\n\n Parameters\n ----------\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n labels:\n list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the i'th\n sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also\n displays the given label of the token.\n\n pred_probs:\n list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of\n the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th\n sentence, and `K` is the number of classes predicted by the model. If both `labels` and `pred_probs` are\n provided, also evaluate each type of given/predicted label swap.\n\n class_names:\n name of classes. If not provided, display the integer index for predicted and given labels.\n\n top:\n maximum number of outputs to be printed.\n\n exclude:\n list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between\n class 0 and 1 are not displayed.\n\n verbose:\n if set to True, also display each type of given/predicted label swap for each token.\n\n Returns\n ---------\n df:\n if both `labels` and `pred_probs` are provided, return a data frame with columns ['token', 'given_label',\n 'predicted_label', 'num_label_issues'], and each row contains the information for a specific token and\n given/predicted label swap, ordered by the number of label issues in descending order. Otherwise, return\n a data frame with columns ['token', 'num_label_issues'], and each row contains the information for a specific\n token, ordered by the number of label issues in descending order.\n\n \"\"\"\n count: Dict[str, Any] = {}\n if not labels or not pred_probs:\n for issue in issues:\n i, j = issue\n word = given_words[i][j]\n if word not in count:\n count[word] = 0\n count[word] += 1\n\n words = [word for word in count.keys()]\n freq = [count[word] for word in words]\n rank = np.argsort(freq)[::-1][:top]\n\n for r in rank:\n print(\n \"Token '%s' is potentially mislabeled %d times throughout the dataset\\n\"\n % (words[r], freq[r])\n )\n\n info = [[word, f] for word, f in zip(words, freq)]\n info = sorted(info, key=lambda x: x[1], reverse=True)\n return pd.DataFrame(info, columns=[\"token\", \"num_label_issues\"])\n\n if not class_names:\n print(\n \"Classes will be printed in terms of their integer index since `class_names` was not provided. \"\n )\n print(\"Specify this argument to see the string names of each class. \\n\")\n\n n = pred_probs[0].shape[1]\n for issue in issues:\n i, j = issue\n word = given_words[i][j]\n label = labels[i][j]\n pred = pred_probs[i][j].argmax()\n if word not in count:\n count[word] = np.zeros([n, n], dtype=int)\n if (label, pred) not in exclude:\n count[word][label][pred] += 1\n words = [word for word in count.keys()]\n freq = [np.sum(count[word]) for word in words]\n rank = np.argsort(freq)[::-1][:top]\n\n for r in rank:\n matrix = count[words[r]]\n most_frequent = np.argsort(count[words[r]].flatten())[::-1]\n print(\n \"Token '%s' is potentially mislabeled %d times throughout the dataset\"\n % (words[r], freq[r])\n )\n if verbose:\n print(\n \"---------------------------------------------------------------------------------------\"\n )\n for f in most_frequent:\n i, j = f // n, f % n\n if matrix[i][j] == 0:\n break\n if class_names:\n print(\n \"labeled as class `%s` but predicted to actually be class `%s` %d times\"\n % (class_names[i], class_names[j], matrix[i][j])\n )\n else:\n print(\n \"labeled as class %d but predicted to actually be class %d %d times\"\n % (i, j, matrix[i][j])\n )\n print()\n info = []\n for word in words:\n for i in range(n):\n for j in range(n):\n num = count[word][i][j]\n if num > 0:\n if not class_names:\n info.append([word, i, j, num])\n else:\n info.append([word, class_names[i], class_names[j], num])\n info = sorted(info, key=lambda x: x[3], reverse=True)\n return pd.DataFrame(\n info, columns=[\"token\", \"given_label\", \"predicted_label\", \"num_label_issues\"]\n )\n\n\ndef filter_by_token(\n token: str, issues: List[Tuple[int, int]], given_words: List[List[str]]\n) -> List[Tuple[int, int]]:\n \"\"\"\n Searches a specific token within all issue tokens\n\n Parameters\n ----------\n token:\n the specific token the user is looking for\n\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n Returns\n ----------\n returned_issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n \"\"\"\n returned_issues = []\n for issue in issues:\n i, j = issue\n if token.lower() == given_words[i][j].lower():\n returned_issues.append(issue)\n return returned_issues\n", "path": "cleanlab/token_classification/summary.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom cleanlab.internal.token_classification_utils import get_sentence, color_sentence\nfrom typing import List, Optional, Tuple, Dict, Any\n\n\ndef display_issues(\n issues: list,\n given_words: List[List[str]],\n *,\n pred_probs: Optional[list] = None,\n labels: Optional[list] = None,\n exclude: List[Tuple[int, int]] = [],\n class_names: Optional[List[str]] = None,\n top: int = 20\n) -> None:\n \"\"\"\n Display issues, including sentence with issue token highlighted. Also shows given and predicted label\n if possible.\n\n Parameters\n ----------\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n pred_probs:\n list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of\n the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th\n sentence, and `K` is the number of classes predicted by the model. If provided, also displays the predicted\n label of the token.\n\n labels:\n list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the\n i'th sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also\n displays the given label of the token.\n\n exclude:\n list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between\n class 0 and 1 are not displayed.\n\n class_names:\n name of classes. If not provided, display the integer index for predicted and given labels.\n\n top: int, default=20\n maximum number of outputs to be printed.\n\n \"\"\"\n if not class_names:\n print(\n \"Classes will be printed in terms of their integer index since `class_names` was not provided. \"\n )\n print(\"Specify this argument to see the string names of each class. \\n\")\n\n shown = 0\n is_tuple = isinstance(issues[0], tuple)\n\n for issue in issues:\n if is_tuple:\n i, j = issue\n sentence = get_sentence(given_words[i])\n word = given_words[i][j]\n\n if pred_probs:\n prediction = pred_probs[i][j].argmax()\n if labels:\n given = labels[i][j]\n if pred_probs and labels:\n if (given, prediction) in exclude:\n continue\n\n if pred_probs and class_names:\n prediction = class_names[prediction]\n if labels and class_names:\n given = class_names[given]\n\n shown += 1\n print(\"Sentence %d, token %d: \\n%s\" % (i, j, color_sentence(sentence, word)))\n if labels and not pred_probs:\n print(\"Given label: %s\\n\" % str(given))\n elif not labels and pred_probs:\n print(\"Predicted label according to provided pred_probs: %s\\n\" % str(prediction))\n elif labels and pred_probs:\n print(\n \"Given label: %s, predicted label according to provided pred_probs: %s\\n\"\n % (str(given), str(prediction))\n )\n else:\n print()\n else:\n shown += 1\n sentence = get_sentence(given_words[issue])\n print(\"Sentence %d: %s\\n\" % (issue, sentence))\n if shown == top:\n break\n\n\ndef common_label_issues(\n issues: List[Tuple[int, int]],\n given_words: List[List[str]],\n *,\n labels: Optional[list] = None,\n pred_probs: Optional[list] = None,\n class_names: Optional[List[str]] = None,\n top: int = 10,\n exclude: List[Tuple[int, int]] = [],\n verbose: bool = True\n) -> pd.DataFrame:\n \"\"\"\n Display the most common tokens that are potentially mislabeled.\n\n Parameters\n ----------\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n labels:\n list of given labels, such that `labels[i]` is a list containing the given labels of the tokens in the i'th\n sentence, and has length equal to the number of given tokens of the i'th sentence. If provided, also\n displays the given label of the token.\n\n pred_probs:\n list of model-predicted probability, such that `pred_probs[i]` contains the model-predicted probability of\n the tokens in the i'th sentence, and has shape `(N, K)`, where `N` is the number of given tokens of the i'th\n sentence, and `K` is the number of classes predicted by the model. If both `labels` and `pred_probs` are\n provided, also evaluate each type of given/predicted label swap.\n\n class_names:\n name of classes. If not provided, display the integer index for predicted and given labels.\n\n top:\n maximum number of outputs to be printed.\n\n exclude:\n list of given/predicted label swaps to be excluded. For example, if `exclude=[(0, 1), (1, 0)]`, swaps between\n class 0 and 1 are not displayed.\n\n verbose:\n if set to True, also display each type of given/predicted label swap for each token.\n\n Returns\n ---------\n df:\n if both `labels` and `pred_probs` are provided, return a data frame with columns ['token', 'given_label',\n 'predicted_label', 'num_label_issues'], and each row contains the information for a specific token and\n given/predicted label swap, ordered by the number of label issues in descending order. Otherwise, return\n a data frame with columns ['token', 'num_label_issues'], and each row contains the information for a specific\n token, ordered by the number of label issues in descending order.\n\n \"\"\"\n count: Dict[str, Any] = {}\n if not labels or not pred_probs:\n for issue in issues:\n i, j = issue\n word = given_words[i][j]\n if word not in count:\n count[word] = 0\n count[word] += 1\n\n words = [word for word in count.keys()]\n freq = [count[word] for word in words]\n rank = np.argsort(freq)[::-1][:top]\n\n for r in rank:\n print(\n \"Token '%s' is potentially mislabeled %d times throughout the dataset\\n\"\n % (words[r], freq[r])\n )\n\n info = [[word, f] for word, f in zip(words, freq)]\n info = sorted(info, key=lambda x: x[1], reverse=True)\n return pd.DataFrame(info, columns=[\"token\", \"num_label_issues\"])\n\n if not class_names:\n print(\n \"Classes will be printed in terms of their integer index since `class_names` was not provided. \"\n )\n print(\"Specify this argument to see the string names of each class. \\n\")\n\n n = pred_probs[0].shape[1]\n for issue in issues:\n i, j = issue\n word = given_words[i][j]\n label = labels[i][j]\n pred = pred_probs[i][j].argmax()\n if word not in count:\n count[word] = np.zeros([n, n], dtype=int)\n if (label, pred) not in exclude:\n count[word][label][pred] += 1\n words = [word for word in count.keys()]\n freq = [np.sum(count[word]) for word in words]\n rank = np.argsort(freq)[::-1][:top]\n\n for r in rank:\n matrix = count[words[r]]\n most_frequent = np.argsort(count[words[r]].flatten())[::-1]\n print(\n \"Token '%s' is potentially mislabeled %d times throughout the dataset\"\n % (words[r], freq[r])\n )\n if verbose:\n print(\n \"---------------------------------------------------------------------------------------\"\n )\n for f in most_frequent:\n i, j = f // n, f % n\n if matrix[i][j] == 0:\n break\n if class_names:\n print(\n \"labeled as class `%s` but predicted to actually be class `%s` %d times\"\n % (class_names[i], class_names[j], matrix[i][j])\n )\n else:\n print(\n \"labeled as class %d but predicted to actually be class %d %d times\"\n % (i, j, matrix[i][j])\n )\n print()\n info = []\n for word in words:\n for i in range(n):\n for j in range(n):\n num = count[word][i][j]\n if num > 0:\n if not class_names:\n info.append([word, i, j, num])\n else:\n info.append([word, class_names[i], class_names[j], num])\n info = sorted(info, key=lambda x: x[3], reverse=True)\n return pd.DataFrame(\n info, columns=[\"token\", \"given_label\", \"predicted_label\", \"num_label_issues\"]\n )\n\n\ndef filter_by_token(\n token: str, issues: List[Tuple[int, int]], given_words: List[List[str]]\n) -> List[Tuple[int, int]]:\n \"\"\"\n Searches a specific token within all issue tokens\n\n Parameters\n ----------\n token:\n the specific token the user is looking for\n\n issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n given_words:\n tokens in a nested-list format, such that `given_words[i]` contains the words of the i'th sentence from\n the original file.\n\n Returns\n ----------\n returned_issues:\n list of tuples `(i, j)`, which represents the j'th token of the i'th sentence.\n\n \"\"\"\n returned_issues = []\n for issue in issues:\n i, j = issue\n if token.lower() == given_words[i][j].lower():\n returned_issues.append(issue)\n return returned_issues\n", "path": "cleanlab/token_classification/summary.py"}]}
| 3,676 | 564 |
gh_patches_debug_23347
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15758
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove the `annotated_date()` and `annotated_now()` template filters
### Proposed Changes
Remove the following template filters from `utilities.templatetags.helpers`:
- `annotated_date()`
- `annotated_now()`
### Justification
With the shift to using ISO 8601-formatted dates & times everywhere in #15735, these filters are no longer needed.
### Impact
All uses of these filters must be updated. These can generally be replaced with the new `isodatetime()` template filter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/templatetags/helpers.py`
Content:
```
1 import datetime
2 import json
3 from typing import Dict, Any
4 from urllib.parse import quote
5
6 from django import template
7 from django.conf import settings
8 from django.template.defaultfilters import date
9 from django.urls import NoReverseMatch, reverse
10 from django.utils import timezone
11 from django.utils.safestring import mark_safe
12
13 from core.models import ObjectType
14 from utilities.forms import get_selected_values, TableConfigForm
15 from utilities.views import get_viewname
16
17 __all__ = (
18 'annotated_date',
19 'annotated_now',
20 'applied_filters',
21 'as_range',
22 'divide',
23 'get_item',
24 'get_key',
25 'humanize_megabytes',
26 'humanize_speed',
27 'icon_from_status',
28 'kg_to_pounds',
29 'meters_to_feet',
30 'percentage',
31 'querystring',
32 'startswith',
33 'status_from_tag',
34 'table_config_form',
35 'utilization_graph',
36 'validated_viewname',
37 'viewname',
38 )
39
40 register = template.Library()
41
42
43 #
44 # Filters
45 #
46
47
48 @register.filter()
49 def viewname(model, action):
50 """
51 Return the view name for the given model and action. Does not perform any validation.
52 """
53 return get_viewname(model, action)
54
55
56 @register.filter()
57 def validated_viewname(model, action):
58 """
59 Return the view name for the given model and action if valid, or None if invalid.
60 """
61 viewname = get_viewname(model, action)
62
63 # Validate the view name
64 try:
65 reverse(viewname)
66 return viewname
67 except NoReverseMatch:
68 return None
69
70
71 @register.filter()
72 def humanize_speed(speed):
73 """
74 Humanize speeds given in Kbps. Examples:
75
76 1544 => "1.544 Mbps"
77 100000 => "100 Mbps"
78 10000000 => "10 Gbps"
79 """
80 if not speed:
81 return ''
82 if speed >= 1000000000 and speed % 1000000000 == 0:
83 return '{} Tbps'.format(int(speed / 1000000000))
84 elif speed >= 1000000 and speed % 1000000 == 0:
85 return '{} Gbps'.format(int(speed / 1000000))
86 elif speed >= 1000 and speed % 1000 == 0:
87 return '{} Mbps'.format(int(speed / 1000))
88 elif speed >= 1000:
89 return '{} Mbps'.format(float(speed) / 1000)
90 else:
91 return '{} Kbps'.format(speed)
92
93
94 @register.filter()
95 def humanize_megabytes(mb):
96 """
97 Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).
98 """
99 if not mb:
100 return ''
101 if not mb % 1048576: # 1024^2
102 return f'{int(mb / 1048576)} TB'
103 if not mb % 1024:
104 return f'{int(mb / 1024)} GB'
105 return f'{mb} MB'
106
107
108 @register.filter(expects_localtime=True)
109 def annotated_date(date_value):
110 """
111 Returns date as HTML span with short date format as the content and the
112 (long) date format as the title.
113 """
114 if not date_value:
115 return ''
116
117 if type(date_value) is datetime.date:
118 long_ts = date(date_value, 'DATE_FORMAT')
119 short_ts = date(date_value, 'SHORT_DATE_FORMAT')
120 else:
121 long_ts = date(date_value, 'DATETIME_FORMAT')
122 short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')
123
124 return mark_safe(f'<span title="{long_ts}">{short_ts}</span>')
125
126
127 @register.simple_tag
128 def annotated_now():
129 """
130 Returns the current date piped through the annotated_date filter.
131 """
132 tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
133 return annotated_date(datetime.datetime.now(tz=tzinfo))
134
135
136 @register.filter()
137 def divide(x, y):
138 """
139 Return x/y (rounded).
140 """
141 if x is None or y is None:
142 return None
143 return round(x / y)
144
145
146 @register.filter()
147 def percentage(x, y):
148 """
149 Return x/y as a percentage.
150 """
151 if x is None or y is None:
152 return None
153
154 return round(x / y * 100, 1)
155
156
157 @register.filter()
158 def as_range(n):
159 """
160 Return a range of n items.
161 """
162 try:
163 int(n)
164 except TypeError:
165 return list()
166 return range(n)
167
168
169 @register.filter()
170 def meters_to_feet(n):
171 """
172 Convert a length from meters to feet.
173 """
174 return float(n) * 3.28084
175
176
177 @register.filter()
178 def kg_to_pounds(n):
179 """
180 Convert a weight from kilograms to pounds.
181 """
182 return float(n) * 2.204623
183
184
185 @register.filter("startswith")
186 def startswith(text: str, starts: str) -> bool:
187 """
188 Template implementation of `str.startswith()`.
189 """
190 if isinstance(text, str):
191 return text.startswith(starts)
192 return False
193
194
195 @register.filter
196 def get_key(value: Dict, arg: str) -> Any:
197 """
198 Template implementation of `dict.get()`, for accessing dict values
199 by key when the key is not able to be used in a template. For
200 example, `{"ui.colormode": "dark"}`.
201 """
202 return value.get(arg, None)
203
204
205 @register.filter
206 def get_item(value: object, attr: str) -> Any:
207 """
208 Template implementation of `__getitem__`, for accessing the `__getitem__` method
209 of a class from a template.
210 """
211 return value[attr]
212
213
214 @register.filter
215 def status_from_tag(tag: str = "info") -> str:
216 """
217 Determine Bootstrap theme status/level from Django's Message.level_tag.
218 """
219 status_map = {
220 'warning': 'warning',
221 'success': 'success',
222 'error': 'danger',
223 'danger': 'danger',
224 'debug': 'info',
225 'info': 'info',
226 }
227 return status_map.get(tag.lower(), 'info')
228
229
230 @register.filter
231 def icon_from_status(status: str = "info") -> str:
232 """
233 Determine icon class name from Bootstrap theme status/level.
234 """
235 icon_map = {
236 'warning': 'alert',
237 'success': 'check-circle',
238 'danger': 'alert',
239 'info': 'information',
240 }
241 return icon_map.get(status.lower(), 'information')
242
243
244 #
245 # Tags
246 #
247
248 @register.simple_tag()
249 def querystring(request, **kwargs):
250 """
251 Append or update the page number in a querystring.
252 """
253 querydict = request.GET.copy()
254 for k, v in kwargs.items():
255 if v is not None:
256 querydict[k] = str(v)
257 elif k in querydict:
258 querydict.pop(k)
259 querystring = querydict.urlencode(safe='/')
260 if querystring:
261 return '?' + querystring
262 else:
263 return ''
264
265
266 @register.inclusion_tag('helpers/utilization_graph.html')
267 def utilization_graph(utilization, warning_threshold=75, danger_threshold=90):
268 """
269 Display a horizontal bar graph indicating a percentage of utilization.
270 """
271 if utilization == 100:
272 bar_class = 'bg-secondary'
273 elif danger_threshold and utilization >= danger_threshold:
274 bar_class = 'bg-danger'
275 elif warning_threshold and utilization >= warning_threshold:
276 bar_class = 'bg-warning'
277 elif warning_threshold or danger_threshold:
278 bar_class = 'bg-success'
279 else:
280 bar_class = 'bg-gray'
281 return {
282 'utilization': utilization,
283 'bar_class': bar_class,
284 }
285
286
287 @register.inclusion_tag('helpers/table_config_form.html')
288 def table_config_form(table, table_name=None):
289 return {
290 'table_name': table_name or table.__class__.__name__,
291 'form': TableConfigForm(table=table),
292 }
293
294
295 @register.inclusion_tag('helpers/applied_filters.html', takes_context=True)
296 def applied_filters(context, model, form, query_params):
297 """
298 Display the active filters for a given filter form.
299 """
300 user = context['request'].user
301 form.is_valid() # Ensure cleaned_data has been set
302
303 applied_filters = []
304 for filter_name in form.changed_data:
305 if filter_name not in form.cleaned_data:
306 continue
307
308 querydict = query_params.copy()
309 if filter_name not in querydict:
310 continue
311
312 bound_field = form.fields[filter_name].get_bound_field(form, filter_name)
313 querydict.pop(filter_name)
314 display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])
315
316 applied_filters.append({
317 'name': filter_name,
318 'value': form.cleaned_data[filter_name],
319 'link_url': f'?{querydict.urlencode()}',
320 'link_text': f'{bound_field.label}: {display_value}',
321 })
322
323 save_link = None
324 if user.has_perm('extras.add_savedfilter') and 'filter_id' not in context['request'].GET:
325 object_type = ObjectType.objects.get_for_model(model).pk
326 parameters = json.dumps(dict(context['request'].GET.lists()))
327 url = reverse('extras:savedfilter_add')
328 save_link = f"{url}?object_types={object_type}¶meters={quote(parameters)}"
329
330 return {
331 'applied_filters': applied_filters,
332 'save_link': save_link,
333 }
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/utilities/templatetags/helpers.py b/netbox/utilities/templatetags/helpers.py
--- a/netbox/utilities/templatetags/helpers.py
+++ b/netbox/utilities/templatetags/helpers.py
@@ -15,8 +15,6 @@
from utilities.views import get_viewname
__all__ = (
- 'annotated_date',
- 'annotated_now',
'applied_filters',
'as_range',
'divide',
@@ -105,34 +103,6 @@
return f'{mb} MB'
[email protected](expects_localtime=True)
-def annotated_date(date_value):
- """
- Returns date as HTML span with short date format as the content and the
- (long) date format as the title.
- """
- if not date_value:
- return ''
-
- if type(date_value) is datetime.date:
- long_ts = date(date_value, 'DATE_FORMAT')
- short_ts = date(date_value, 'SHORT_DATE_FORMAT')
- else:
- long_ts = date(date_value, 'DATETIME_FORMAT')
- short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')
-
- return mark_safe(f'<span title="{long_ts}">{short_ts}</span>')
-
-
[email protected]_tag
-def annotated_now():
- """
- Returns the current date piped through the annotated_date filter.
- """
- tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
- return annotated_date(datetime.datetime.now(tz=tzinfo))
-
-
@register.filter()
def divide(x, y):
"""
|
{"golden_diff": "diff --git a/netbox/utilities/templatetags/helpers.py b/netbox/utilities/templatetags/helpers.py\n--- a/netbox/utilities/templatetags/helpers.py\n+++ b/netbox/utilities/templatetags/helpers.py\n@@ -15,8 +15,6 @@\n from utilities.views import get_viewname\n \n __all__ = (\n- 'annotated_date',\n- 'annotated_now',\n 'applied_filters',\n 'as_range',\n 'divide',\n@@ -105,34 +103,6 @@\n return f'{mb} MB'\n \n \[email protected](expects_localtime=True)\n-def annotated_date(date_value):\n- \"\"\"\n- Returns date as HTML span with short date format as the content and the\n- (long) date format as the title.\n- \"\"\"\n- if not date_value:\n- return ''\n-\n- if type(date_value) is datetime.date:\n- long_ts = date(date_value, 'DATE_FORMAT')\n- short_ts = date(date_value, 'SHORT_DATE_FORMAT')\n- else:\n- long_ts = date(date_value, 'DATETIME_FORMAT')\n- short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')\n-\n- return mark_safe(f'<span title=\"{long_ts}\">{short_ts}</span>')\n-\n-\[email protected]_tag\n-def annotated_now():\n- \"\"\"\n- Returns the current date piped through the annotated_date filter.\n- \"\"\"\n- tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None\n- return annotated_date(datetime.datetime.now(tz=tzinfo))\n-\n-\n @register.filter()\n def divide(x, y):\n \"\"\"\n", "issue": "Remove the `annotated_date()` and `annotated_now()` template filters\n### Proposed Changes\n\nRemove the following template filters from `utilities.templatetags.helpers`:\r\n\r\n- `annotated_date()`\r\n- `annotated_now()`\n\n### Justification\n\nWith the shift to using ISO 8601-formatted dates & times everywhere in #15735, these filters are no longer needed.\n\n### Impact\n\nAll uses of these filters must be updated. These can generally be replaced with the new `isodatetime()` template filter.\n", "before_files": [{"content": "import datetime\nimport json\nfrom typing import Dict, Any\nfrom urllib.parse import quote\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import date\nfrom django.urls import NoReverseMatch, reverse\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\n\nfrom core.models import ObjectType\nfrom utilities.forms import get_selected_values, TableConfigForm\nfrom utilities.views import get_viewname\n\n__all__ = (\n 'annotated_date',\n 'annotated_now',\n 'applied_filters',\n 'as_range',\n 'divide',\n 'get_item',\n 'get_key',\n 'humanize_megabytes',\n 'humanize_speed',\n 'icon_from_status',\n 'kg_to_pounds',\n 'meters_to_feet',\n 'percentage',\n 'querystring',\n 'startswith',\n 'status_from_tag',\n 'table_config_form',\n 'utilization_graph',\n 'validated_viewname',\n 'viewname',\n)\n\nregister = template.Library()\n\n\n#\n# Filters\n#\n\n\[email protected]()\ndef viewname(model, action):\n \"\"\"\n Return the view name for the given model and action. Does not perform any validation.\n \"\"\"\n return get_viewname(model, action)\n\n\[email protected]()\ndef validated_viewname(model, action):\n \"\"\"\n Return the view name for the given model and action if valid, or None if invalid.\n \"\"\"\n viewname = get_viewname(model, action)\n\n # Validate the view name\n try:\n reverse(viewname)\n return viewname\n except NoReverseMatch:\n return None\n\n\[email protected]()\ndef humanize_speed(speed):\n \"\"\"\n Humanize speeds given in Kbps. Examples:\n\n 1544 => \"1.544 Mbps\"\n 100000 => \"100 Mbps\"\n 10000000 => \"10 Gbps\"\n \"\"\"\n if not speed:\n return ''\n if speed >= 1000000000 and speed % 1000000000 == 0:\n return '{} Tbps'.format(int(speed / 1000000000))\n elif speed >= 1000000 and speed % 1000000 == 0:\n return '{} Gbps'.format(int(speed / 1000000))\n elif speed >= 1000 and speed % 1000 == 0:\n return '{} Mbps'.format(int(speed / 1000))\n elif speed >= 1000:\n return '{} Mbps'.format(float(speed) / 1000)\n else:\n return '{} Kbps'.format(speed)\n\n\[email protected]()\ndef humanize_megabytes(mb):\n \"\"\"\n Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).\n \"\"\"\n if not mb:\n return ''\n if not mb % 1048576: # 1024^2\n return f'{int(mb / 1048576)} TB'\n if not mb % 1024:\n return f'{int(mb / 1024)} GB'\n return f'{mb} MB'\n\n\[email protected](expects_localtime=True)\ndef annotated_date(date_value):\n \"\"\"\n Returns date as HTML span with short date format as the content and the\n (long) date format as the title.\n \"\"\"\n if not date_value:\n return ''\n\n if type(date_value) is datetime.date:\n long_ts = date(date_value, 'DATE_FORMAT')\n short_ts = date(date_value, 'SHORT_DATE_FORMAT')\n else:\n long_ts = date(date_value, 'DATETIME_FORMAT')\n short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')\n\n return mark_safe(f'<span title=\"{long_ts}\">{short_ts}</span>')\n\n\[email protected]_tag\ndef annotated_now():\n \"\"\"\n Returns the current date piped through the annotated_date filter.\n \"\"\"\n tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None\n return annotated_date(datetime.datetime.now(tz=tzinfo))\n\n\[email protected]()\ndef divide(x, y):\n \"\"\"\n Return x/y (rounded).\n \"\"\"\n if x is None or y is None:\n return None\n return round(x / y)\n\n\[email protected]()\ndef percentage(x, y):\n \"\"\"\n Return x/y as a percentage.\n \"\"\"\n if x is None or y is None:\n return None\n\n return round(x / y * 100, 1)\n\n\[email protected]()\ndef as_range(n):\n \"\"\"\n Return a range of n items.\n \"\"\"\n try:\n int(n)\n except TypeError:\n return list()\n return range(n)\n\n\[email protected]()\ndef meters_to_feet(n):\n \"\"\"\n Convert a length from meters to feet.\n \"\"\"\n return float(n) * 3.28084\n\n\[email protected]()\ndef kg_to_pounds(n):\n \"\"\"\n Convert a weight from kilograms to pounds.\n \"\"\"\n return float(n) * 2.204623\n\n\[email protected](\"startswith\")\ndef startswith(text: str, starts: str) -> bool:\n \"\"\"\n Template implementation of `str.startswith()`.\n \"\"\"\n if isinstance(text, str):\n return text.startswith(starts)\n return False\n\n\[email protected]\ndef get_key(value: Dict, arg: str) -> Any:\n \"\"\"\n Template implementation of `dict.get()`, for accessing dict values\n by key when the key is not able to be used in a template. For\n example, `{\"ui.colormode\": \"dark\"}`.\n \"\"\"\n return value.get(arg, None)\n\n\[email protected]\ndef get_item(value: object, attr: str) -> Any:\n \"\"\"\n Template implementation of `__getitem__`, for accessing the `__getitem__` method\n of a class from a template.\n \"\"\"\n return value[attr]\n\n\[email protected]\ndef status_from_tag(tag: str = \"info\") -> str:\n \"\"\"\n Determine Bootstrap theme status/level from Django's Message.level_tag.\n \"\"\"\n status_map = {\n 'warning': 'warning',\n 'success': 'success',\n 'error': 'danger',\n 'danger': 'danger',\n 'debug': 'info',\n 'info': 'info',\n }\n return status_map.get(tag.lower(), 'info')\n\n\[email protected]\ndef icon_from_status(status: str = \"info\") -> str:\n \"\"\"\n Determine icon class name from Bootstrap theme status/level.\n \"\"\"\n icon_map = {\n 'warning': 'alert',\n 'success': 'check-circle',\n 'danger': 'alert',\n 'info': 'information',\n }\n return icon_map.get(status.lower(), 'information')\n\n\n#\n# Tags\n#\n\[email protected]_tag()\ndef querystring(request, **kwargs):\n \"\"\"\n Append or update the page number in a querystring.\n \"\"\"\n querydict = request.GET.copy()\n for k, v in kwargs.items():\n if v is not None:\n querydict[k] = str(v)\n elif k in querydict:\n querydict.pop(k)\n querystring = querydict.urlencode(safe='/')\n if querystring:\n return '?' + querystring\n else:\n return ''\n\n\[email protected]_tag('helpers/utilization_graph.html')\ndef utilization_graph(utilization, warning_threshold=75, danger_threshold=90):\n \"\"\"\n Display a horizontal bar graph indicating a percentage of utilization.\n \"\"\"\n if utilization == 100:\n bar_class = 'bg-secondary'\n elif danger_threshold and utilization >= danger_threshold:\n bar_class = 'bg-danger'\n elif warning_threshold and utilization >= warning_threshold:\n bar_class = 'bg-warning'\n elif warning_threshold or danger_threshold:\n bar_class = 'bg-success'\n else:\n bar_class = 'bg-gray'\n return {\n 'utilization': utilization,\n 'bar_class': bar_class,\n }\n\n\[email protected]_tag('helpers/table_config_form.html')\ndef table_config_form(table, table_name=None):\n return {\n 'table_name': table_name or table.__class__.__name__,\n 'form': TableConfigForm(table=table),\n }\n\n\[email protected]_tag('helpers/applied_filters.html', takes_context=True)\ndef applied_filters(context, model, form, query_params):\n \"\"\"\n Display the active filters for a given filter form.\n \"\"\"\n user = context['request'].user\n form.is_valid() # Ensure cleaned_data has been set\n\n applied_filters = []\n for filter_name in form.changed_data:\n if filter_name not in form.cleaned_data:\n continue\n\n querydict = query_params.copy()\n if filter_name not in querydict:\n continue\n\n bound_field = form.fields[filter_name].get_bound_field(form, filter_name)\n querydict.pop(filter_name)\n display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])\n\n applied_filters.append({\n 'name': filter_name,\n 'value': form.cleaned_data[filter_name],\n 'link_url': f'?{querydict.urlencode()}',\n 'link_text': f'{bound_field.label}: {display_value}',\n })\n\n save_link = None\n if user.has_perm('extras.add_savedfilter') and 'filter_id' not in context['request'].GET:\n object_type = ObjectType.objects.get_for_model(model).pk\n parameters = json.dumps(dict(context['request'].GET.lists()))\n url = reverse('extras:savedfilter_add')\n save_link = f\"{url}?object_types={object_type}¶meters={quote(parameters)}\"\n\n return {\n 'applied_filters': applied_filters,\n 'save_link': save_link,\n }\n", "path": "netbox/utilities/templatetags/helpers.py"}], "after_files": [{"content": "import datetime\nimport json\nfrom typing import Dict, Any\nfrom urllib.parse import quote\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import date\nfrom django.urls import NoReverseMatch, reverse\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\n\nfrom core.models import ObjectType\nfrom utilities.forms import get_selected_values, TableConfigForm\nfrom utilities.views import get_viewname\n\n__all__ = (\n 'applied_filters',\n 'as_range',\n 'divide',\n 'get_item',\n 'get_key',\n 'humanize_megabytes',\n 'humanize_speed',\n 'icon_from_status',\n 'kg_to_pounds',\n 'meters_to_feet',\n 'percentage',\n 'querystring',\n 'startswith',\n 'status_from_tag',\n 'table_config_form',\n 'utilization_graph',\n 'validated_viewname',\n 'viewname',\n)\n\nregister = template.Library()\n\n\n#\n# Filters\n#\n\n\[email protected]()\ndef viewname(model, action):\n \"\"\"\n Return the view name for the given model and action. Does not perform any validation.\n \"\"\"\n return get_viewname(model, action)\n\n\[email protected]()\ndef validated_viewname(model, action):\n \"\"\"\n Return the view name for the given model and action if valid, or None if invalid.\n \"\"\"\n viewname = get_viewname(model, action)\n\n # Validate the view name\n try:\n reverse(viewname)\n return viewname\n except NoReverseMatch:\n return None\n\n\[email protected]()\ndef humanize_speed(speed):\n \"\"\"\n Humanize speeds given in Kbps. Examples:\n\n 1544 => \"1.544 Mbps\"\n 100000 => \"100 Mbps\"\n 10000000 => \"10 Gbps\"\n \"\"\"\n if not speed:\n return ''\n if speed >= 1000000000 and speed % 1000000000 == 0:\n return '{} Tbps'.format(int(speed / 1000000000))\n elif speed >= 1000000 and speed % 1000000 == 0:\n return '{} Gbps'.format(int(speed / 1000000))\n elif speed >= 1000 and speed % 1000 == 0:\n return '{} Mbps'.format(int(speed / 1000))\n elif speed >= 1000:\n return '{} Mbps'.format(float(speed) / 1000)\n else:\n return '{} Kbps'.format(speed)\n\n\[email protected]()\ndef humanize_megabytes(mb):\n \"\"\"\n Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).\n \"\"\"\n if not mb:\n return ''\n if not mb % 1048576: # 1024^2\n return f'{int(mb / 1048576)} TB'\n if not mb % 1024:\n return f'{int(mb / 1024)} GB'\n return f'{mb} MB'\n\n\[email protected]()\ndef divide(x, y):\n \"\"\"\n Return x/y (rounded).\n \"\"\"\n if x is None or y is None:\n return None\n return round(x / y)\n\n\[email protected]()\ndef percentage(x, y):\n \"\"\"\n Return x/y as a percentage.\n \"\"\"\n if x is None or y is None:\n return None\n\n return round(x / y * 100, 1)\n\n\[email protected]()\ndef as_range(n):\n \"\"\"\n Return a range of n items.\n \"\"\"\n try:\n int(n)\n except TypeError:\n return list()\n return range(n)\n\n\[email protected]()\ndef meters_to_feet(n):\n \"\"\"\n Convert a length from meters to feet.\n \"\"\"\n return float(n) * 3.28084\n\n\[email protected]()\ndef kg_to_pounds(n):\n \"\"\"\n Convert a weight from kilograms to pounds.\n \"\"\"\n return float(n) * 2.204623\n\n\[email protected](\"startswith\")\ndef startswith(text: str, starts: str) -> bool:\n \"\"\"\n Template implementation of `str.startswith()`.\n \"\"\"\n if isinstance(text, str):\n return text.startswith(starts)\n return False\n\n\[email protected]\ndef get_key(value: Dict, arg: str) -> Any:\n \"\"\"\n Template implementation of `dict.get()`, for accessing dict values\n by key when the key is not able to be used in a template. For\n example, `{\"ui.colormode\": \"dark\"}`.\n \"\"\"\n return value.get(arg, None)\n\n\[email protected]\ndef get_item(value: object, attr: str) -> Any:\n \"\"\"\n Template implementation of `__getitem__`, for accessing the `__getitem__` method\n of a class from a template.\n \"\"\"\n return value[attr]\n\n\[email protected]\ndef status_from_tag(tag: str = \"info\") -> str:\n \"\"\"\n Determine Bootstrap theme status/level from Django's Message.level_tag.\n \"\"\"\n status_map = {\n 'warning': 'warning',\n 'success': 'success',\n 'error': 'danger',\n 'danger': 'danger',\n 'debug': 'info',\n 'info': 'info',\n }\n return status_map.get(tag.lower(), 'info')\n\n\[email protected]\ndef icon_from_status(status: str = \"info\") -> str:\n \"\"\"\n Determine icon class name from Bootstrap theme status/level.\n \"\"\"\n icon_map = {\n 'warning': 'alert',\n 'success': 'check-circle',\n 'danger': 'alert',\n 'info': 'information',\n }\n return icon_map.get(status.lower(), 'information')\n\n\n#\n# Tags\n#\n\[email protected]_tag()\ndef querystring(request, **kwargs):\n \"\"\"\n Append or update the page number in a querystring.\n \"\"\"\n querydict = request.GET.copy()\n for k, v in kwargs.items():\n if v is not None:\n querydict[k] = str(v)\n elif k in querydict:\n querydict.pop(k)\n querystring = querydict.urlencode(safe='/')\n if querystring:\n return '?' + querystring\n else:\n return ''\n\n\[email protected]_tag('helpers/utilization_graph.html')\ndef utilization_graph(utilization, warning_threshold=75, danger_threshold=90):\n \"\"\"\n Display a horizontal bar graph indicating a percentage of utilization.\n \"\"\"\n if utilization == 100:\n bar_class = 'bg-secondary'\n elif danger_threshold and utilization >= danger_threshold:\n bar_class = 'bg-danger'\n elif warning_threshold and utilization >= warning_threshold:\n bar_class = 'bg-warning'\n elif warning_threshold or danger_threshold:\n bar_class = 'bg-success'\n else:\n bar_class = 'bg-gray'\n return {\n 'utilization': utilization,\n 'bar_class': bar_class,\n }\n\n\[email protected]_tag('helpers/table_config_form.html')\ndef table_config_form(table, table_name=None):\n return {\n 'table_name': table_name or table.__class__.__name__,\n 'form': TableConfigForm(table=table),\n }\n\n\[email protected]_tag('helpers/applied_filters.html', takes_context=True)\ndef applied_filters(context, model, form, query_params):\n \"\"\"\n Display the active filters for a given filter form.\n \"\"\"\n user = context['request'].user\n form.is_valid() # Ensure cleaned_data has been set\n\n applied_filters = []\n for filter_name in form.changed_data:\n if filter_name not in form.cleaned_data:\n continue\n\n querydict = query_params.copy()\n if filter_name not in querydict:\n continue\n\n bound_field = form.fields[filter_name].get_bound_field(form, filter_name)\n querydict.pop(filter_name)\n display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])\n\n applied_filters.append({\n 'name': filter_name,\n 'value': form.cleaned_data[filter_name],\n 'link_url': f'?{querydict.urlencode()}',\n 'link_text': f'{bound_field.label}: {display_value}',\n })\n\n save_link = None\n if user.has_perm('extras.add_savedfilter') and 'filter_id' not in context['request'].GET:\n object_type = ObjectType.objects.get_for_model(model).pk\n parameters = json.dumps(dict(context['request'].GET.lists()))\n url = reverse('extras:savedfilter_add')\n save_link = f\"{url}?object_types={object_type}¶meters={quote(parameters)}\"\n\n return {\n 'applied_filters': applied_filters,\n 'save_link': save_link,\n }\n", "path": "netbox/utilities/templatetags/helpers.py"}]}
| 3,442 | 366 |
gh_patches_debug_23722
|
rasdani/github-patches
|
git_diff
|
intel__dffml-585
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docs: source: New File source tutorial
The question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources.
We're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first.
You'll want to make a
`docs/tutorials/sources/index.rst`
Then take this exist tutorial and move it under
`docs/tutoirials/sources/complex.rst`
You're new one will go under
`sources/file.rst`
Te idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini`
Try to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dffml/source/ini.py`
Content:
```
1 from configparser import ConfigParser
2
3 from ..base import config
4 from ..record import Record
5 from .file import FileSource
6 from .memory import MemorySource
7 from ..util.data import parser_helper
8 from ..util.entrypoint import entrypoint
9
10
11 @config
12 class INISourceConfig:
13 filename: str
14 readwrite: bool = False
15 allowempty: bool = False
16
17
18 @entrypoint("ini")
19 class INISource(FileSource, MemorySource):
20 """
21 Source to read files in .ini format.
22 """
23
24 CONFIG = INISourceConfig
25
26 async def load_fd(self, ifile):
27 # Creating an instance of configparser
28 parser = ConfigParser()
29 # Read from a file object
30 parser.read_file(ifile)
31 # Get all the sections present in the file
32 sections = parser.sections()
33
34 self.mem = {}
35
36 # Go over each section
37 for section in sections:
38 # Get data under each section as a dict
39 temp_dict = {}
40 for k, v in parser.items(section):
41 temp_dict[k] = parser_helper(v)
42 # Each section used as a record
43 self.mem[str(section)] = Record(
44 str(section), data={"features": temp_dict},
45 )
46
47 self.logger.debug("%r loaded %d sections", self, len(self.mem))
48
49 async def dump_fd(self, fd):
50 # Create an instance of configparser
51 parser = ConfigParser()
52
53 # Go over each section and record in mem
54 for section, record in self.mem.items():
55 # Get each section data as a dict
56 section_data = record.features()
57 if section not in parser.keys():
58 # If section does not exist add new section
59 parser.add_section(section)
60 # Set section data
61 parser[section] = section_data
62
63 # Write to the fileobject
64 parser.write(fd)
65
66 self.logger.debug("%r saved %d sections", self, len(self.mem))
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dffml/source/ini.py b/dffml/source/ini.py
--- a/dffml/source/ini.py
+++ b/dffml/source/ini.py
@@ -23,11 +23,11 @@
CONFIG = INISourceConfig
- async def load_fd(self, ifile):
+ async def load_fd(self, fileobj):
# Creating an instance of configparser
parser = ConfigParser()
# Read from a file object
- parser.read_file(ifile)
+ parser.read_file(fileobj)
# Get all the sections present in the file
sections = parser.sections()
@@ -46,7 +46,7 @@
self.logger.debug("%r loaded %d sections", self, len(self.mem))
- async def dump_fd(self, fd):
+ async def dump_fd(self, fileobj):
# Create an instance of configparser
parser = ConfigParser()
@@ -61,6 +61,6 @@
parser[section] = section_data
# Write to the fileobject
- parser.write(fd)
+ parser.write(fileobj)
self.logger.debug("%r saved %d sections", self, len(self.mem))
|
{"golden_diff": "diff --git a/dffml/source/ini.py b/dffml/source/ini.py\n--- a/dffml/source/ini.py\n+++ b/dffml/source/ini.py\n@@ -23,11 +23,11 @@\n \n CONFIG = INISourceConfig\n \n- async def load_fd(self, ifile):\n+ async def load_fd(self, fileobj):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n- parser.read_file(ifile)\n+ parser.read_file(fileobj)\n # Get all the sections present in the file\n sections = parser.sections()\n \n@@ -46,7 +46,7 @@\n \n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n \n- async def dump_fd(self, fd):\n+ async def dump_fd(self, fileobj):\n # Create an instance of configparser\n parser = ConfigParser()\n \n@@ -61,6 +61,6 @@\n parser[section] = section_data\n \n # Write to the fileobject\n- parser.write(fd)\n+ parser.write(fileobj)\n \n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "issue": "docs: source: New File source tutorial\nThe question of how to make a new data source came up [recently](https://gitter.im/dffml/community?at=5e0e4bfbfd580457e7b3e26a). We should have multiple tutorials for sources.\r\n\r\nWe're going to use https://github.com/intel/dffml/issues/551 as the example for this tutorial, so that issue needs to be closed first.\r\n\r\nYou'll want to make a\r\n\r\n`docs/tutorials/sources/index.rst`\r\n\r\nThen take this exist tutorial and move it under\r\n\r\n`docs/tutoirials/sources/complex.rst`\r\n\r\nYou're new one will go under \r\n\r\n`sources/file.rst`\r\n\r\nTe idea here is just to explain how you can write a source which reads data from some new filetype, with our example being the `.ini`\r\n\r\nTry to follow the new model tutorial format, where we do `literalinclude` and `:lines:` to talk about individual sections of the files we're writing.\n", "before_files": [{"content": "from configparser import ConfigParser\n\nfrom ..base import config\nfrom ..record import Record\nfrom .file import FileSource\nfrom .memory import MemorySource\nfrom ..util.data import parser_helper\nfrom ..util.entrypoint import entrypoint\n\n\n@config\nclass INISourceConfig:\n filename: str\n readwrite: bool = False\n allowempty: bool = False\n\n\n@entrypoint(\"ini\")\nclass INISource(FileSource, MemorySource):\n \"\"\"\n Source to read files in .ini format.\n \"\"\"\n\n CONFIG = INISourceConfig\n\n async def load_fd(self, ifile):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n parser.read_file(ifile)\n # Get all the sections present in the file\n sections = parser.sections()\n\n self.mem = {}\n\n # Go over each section\n for section in sections:\n # Get data under each section as a dict\n temp_dict = {}\n for k, v in parser.items(section):\n temp_dict[k] = parser_helper(v)\n # Each section used as a record\n self.mem[str(section)] = Record(\n str(section), data={\"features\": temp_dict},\n )\n\n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n\n async def dump_fd(self, fd):\n # Create an instance of configparser\n parser = ConfigParser()\n\n # Go over each section and record in mem\n for section, record in self.mem.items():\n # Get each section data as a dict\n section_data = record.features()\n if section not in parser.keys():\n # If section does not exist add new section\n parser.add_section(section)\n # Set section data\n parser[section] = section_data\n\n # Write to the fileobject\n parser.write(fd)\n\n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "path": "dffml/source/ini.py"}], "after_files": [{"content": "from configparser import ConfigParser\n\nfrom ..base import config\nfrom ..record import Record\nfrom .file import FileSource\nfrom .memory import MemorySource\nfrom ..util.data import parser_helper\nfrom ..util.entrypoint import entrypoint\n\n\n@config\nclass INISourceConfig:\n filename: str\n readwrite: bool = False\n allowempty: bool = False\n\n\n@entrypoint(\"ini\")\nclass INISource(FileSource, MemorySource):\n \"\"\"\n Source to read files in .ini format.\n \"\"\"\n\n CONFIG = INISourceConfig\n\n async def load_fd(self, fileobj):\n # Creating an instance of configparser\n parser = ConfigParser()\n # Read from a file object\n parser.read_file(fileobj)\n # Get all the sections present in the file\n sections = parser.sections()\n\n self.mem = {}\n\n # Go over each section\n for section in sections:\n # Get data under each section as a dict\n temp_dict = {}\n for k, v in parser.items(section):\n temp_dict[k] = parser_helper(v)\n # Each section used as a record\n self.mem[str(section)] = Record(\n str(section), data={\"features\": temp_dict},\n )\n\n self.logger.debug(\"%r loaded %d sections\", self, len(self.mem))\n\n async def dump_fd(self, fileobj):\n # Create an instance of configparser\n parser = ConfigParser()\n\n # Go over each section and record in mem\n for section, record in self.mem.items():\n # Get each section data as a dict\n section_data = record.features()\n if section not in parser.keys():\n # If section does not exist add new section\n parser.add_section(section)\n # Set section data\n parser[section] = section_data\n\n # Write to the fileobject\n parser.write(fileobj)\n\n self.logger.debug(\"%r saved %d sections\", self, len(self.mem))\n", "path": "dffml/source/ini.py"}]}
| 1,025 | 267 |
gh_patches_debug_28113
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-1835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Container tile does not show number of projects
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/plans/serializers.py`
Content:
```
1 from django.utils.translation import ugettext as _
2 from easy_thumbnails.files import get_thumbnailer
3 from rest_framework import serializers
4
5 from adhocracy4.projects.models import Project
6 from meinberlin.apps.projects import get_project_type
7
8 from .models import Plan
9
10
11 class CommonFields:
12
13 def get_district(self, instance):
14 city_wide = _('City wide')
15 district_name = str(city_wide)
16 if instance.administrative_district:
17 district_name = instance.administrative_district.name
18 return district_name
19
20 def get_point(self, instance):
21 point = instance.point
22 if not point:
23 point = ''
24 return point
25
26
27 class ProjectSerializer(serializers.ModelSerializer, CommonFields):
28 type = serializers.SerializerMethodField()
29 subtype = serializers.SerializerMethodField()
30 title = serializers.SerializerMethodField()
31 url = serializers.SerializerMethodField()
32 organisation = serializers.SerializerMethodField()
33 point = serializers.SerializerMethodField()
34 point_label = serializers.SerializerMethodField()
35 cost = serializers.SerializerMethodField()
36 district = serializers.SerializerMethodField()
37 status = serializers.SerializerMethodField()
38 participation = serializers.SerializerMethodField()
39 participation_active = serializers.SerializerMethodField()
40 participation_string = serializers.SerializerMethodField()
41 participation_display = serializers.SerializerMethodField()
42 future_phase = serializers.SerializerMethodField()
43 active_phase = serializers.SerializerMethodField()
44 past_phase = serializers.SerializerMethodField()
45 tile_image = serializers.SerializerMethodField()
46 plan_url = serializers.SerializerMethodField()
47 plan_title = serializers.SerializerMethodField()
48
49 class Meta:
50 model = Project
51 fields = ['type', 'subtype', 'title', 'url',
52 'organisation', 'tile_image',
53 'tile_image_copyright',
54 'point', 'point_label', 'cost',
55 'district', 'topic',
56 'status',
57 'participation_string',
58 'participation_active',
59 'participation', 'participation_display', 'description',
60 'future_phase', 'active_phase',
61 'past_phase', 'plan_url', 'plan_title']
62
63 def _get_participation_status_project(self, instance):
64 if instance.phases.active_phases():
65 return _('running'), True
66 elif instance.phases.future_phases():
67 try:
68 return (_('starts at {}').format
69 (instance.phases.future_phases().first().
70 start_date.date()),
71 True)
72 except AttributeError:
73 return (_('starts in the future'),
74 True)
75 else:
76 return _('done'), False
77
78 def get_type(self, instance):
79 return 'project'
80
81 def get_subtype(self, instance):
82 subtype = get_project_type(instance)
83 if subtype in ('external', 'bplan'):
84 return 'external'
85 return subtype
86
87 def get_title(self, instance):
88 return instance.name
89
90 def get_url(self, instance):
91 if get_project_type(instance) in ('external', 'bplan'):
92 return instance.externalproject.url
93 return instance.get_absolute_url()
94
95 def get_organisation(self, instance):
96 return instance.organisation.name
97
98 def get_tile_image(self, instance):
99 image_url = ''
100 if instance.tile_image:
101 image = get_thumbnailer(instance.tile_image)['project_tile']
102 image_url = image.url
103 return image_url
104
105 def get_point_label(self, instance):
106 return ''
107
108 def get_cost(self, instance):
109 return ''
110
111 def get_status(self, instance):
112 if instance.phases.active_phases() or instance.phases.future_phases():
113 return 2
114 return 3
115
116 def get_participation(self, instance):
117 return 1
118
119 def get_participation_display(self, instance):
120 return _('Yes')
121
122 def get_future_phase(self, instance):
123 if (instance.future_phases and
124 instance.future_phases.first().start_date):
125 return str(
126 instance.future_phases.first().start_date.date())
127 return False
128
129 def get_active_phase(self, instance):
130 if instance.active_phase:
131 progress = instance.active_phase_progress
132 time_left = instance.time_left
133 return [progress, time_left]
134 return False
135
136 def get_past_phase(self, instance):
137 if instance.phases.past_phases():
138 return True
139 return False
140
141 def get_participation_string(self, instance):
142 participation_string, participation_active = \
143 self._get_participation_status_project(instance)
144 return str(participation_string)
145
146 def get_participation_active(self, instance):
147 participation_string, participation_active = \
148 self._get_participation_status_project(instance)
149 return participation_active
150
151 def get_plan_url(self, instance):
152 if instance.plans.exists():
153 return instance.plans.first().get_absolute_url()
154 return None
155
156 def get_plan_title(self, instance):
157 if instance.plans.exists():
158 return instance.plans.first().title
159 return None
160
161
162 class PlanSerializer(serializers.ModelSerializer, CommonFields):
163 type = serializers.SerializerMethodField()
164 subtype = serializers.SerializerMethodField()
165 url = serializers.SerializerMethodField()
166 district = serializers.SerializerMethodField()
167 point = serializers.SerializerMethodField()
168 participation_active = serializers.SerializerMethodField()
169 participation_string = serializers.SerializerMethodField()
170 published_projects_count = serializers.SerializerMethodField()
171
172 class Meta:
173 model = Plan
174 fields = ['type', 'subtype', 'title', 'url',
175 'organisation', 'point',
176 'point_label', 'cost',
177 'district', 'topic', 'status',
178 'participation',
179 'participation_string',
180 'participation_active',
181 'published_projects_count']
182
183 def get_subtype(self, instance):
184 return 'plan'
185
186 def _get_participation_string(self, projects):
187 future_phase = None
188 for project in projects:
189 phases = project.phases
190 if phases.active_phases():
191 return _('running')
192 if phases.future_phases() and \
193 phases.future_phases().first().start_date:
194 date = phases.future_phases().first().start_date
195 if not future_phase:
196 future_phase = date
197 else:
198 if date < future_phase:
199 future_phase = date
200
201 if future_phase:
202 return _('starts at {}').format(future_phase.date())
203
204 def _get_participation_status_plan(self, item):
205 projects = item.projects.all() \
206 .filter(is_draft=False,
207 is_archived=False,
208 is_public=True)
209 if not projects:
210 return item.get_participation_display(), False
211 else:
212 participation_string = self._get_participation_string(projects)
213 if participation_string:
214 return participation_string, True
215 else:
216 return item.get_participation_display(), False
217
218 def get_type(self, instance):
219 return 'plan'
220
221 def get_url(self, instance):
222 return instance.get_absolute_url()
223
224 def get_published_projects_count(self, instance):
225 return instance.published_projects.count()
226
227 def get_participation_string(self, instance):
228 participation_string, participation_active = \
229 self._get_participation_status_plan(instance)
230 return str(participation_string)
231
232 def get_participation_active(self, instance):
233 participation_string, participation_active = \
234 self._get_participation_status_plan(instance)
235 return participation_active
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/plans/serializers.py b/meinberlin/apps/plans/serializers.py
--- a/meinberlin/apps/plans/serializers.py
+++ b/meinberlin/apps/plans/serializers.py
@@ -45,6 +45,7 @@
tile_image = serializers.SerializerMethodField()
plan_url = serializers.SerializerMethodField()
plan_title = serializers.SerializerMethodField()
+ published_projects_count = serializers.SerializerMethodField()
class Meta:
model = Project
@@ -58,7 +59,8 @@
'participation_active',
'participation', 'participation_display', 'description',
'future_phase', 'active_phase',
- 'past_phase', 'plan_url', 'plan_title']
+ 'past_phase', 'plan_url', 'plan_title',
+ 'published_projects_count']
def _get_participation_status_project(self, instance):
if instance.phases.active_phases():
@@ -158,6 +160,10 @@
return instance.plans.first().title
return None
+ def get_published_projects_count(self, instance):
+ if hasattr(instance, 'projectcontainer') and instance.projectcontainer:
+ return instance.projectcontainer.active_project_count
+
class PlanSerializer(serializers.ModelSerializer, CommonFields):
type = serializers.SerializerMethodField()
|
{"golden_diff": "diff --git a/meinberlin/apps/plans/serializers.py b/meinberlin/apps/plans/serializers.py\n--- a/meinberlin/apps/plans/serializers.py\n+++ b/meinberlin/apps/plans/serializers.py\n@@ -45,6 +45,7 @@\n tile_image = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n+ published_projects_count = serializers.SerializerMethodField()\n \n class Meta:\n model = Project\n@@ -58,7 +59,8 @@\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n- 'past_phase', 'plan_url', 'plan_title']\n+ 'past_phase', 'plan_url', 'plan_title',\n+ 'published_projects_count']\n \n def _get_participation_status_project(self, instance):\n if instance.phases.active_phases():\n@@ -158,6 +160,10 @@\n return instance.plans.first().title\n return None\n \n+ def get_published_projects_count(self, instance):\n+ if hasattr(instance, 'projectcontainer') and instance.projectcontainer:\n+ return instance.projectcontainer.active_project_count\n+\n \n class PlanSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n", "issue": "Container tile does not show number of projects \n\n", "before_files": [{"content": "from django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\nfrom .models import Plan\n\n\nclass CommonFields:\n\n def get_district(self, instance):\n city_wide = _('City wide')\n district_name = str(city_wide)\n if instance.administrative_district:\n district_name = instance.administrative_district.name\n return district_name\n\n def get_point(self, instance):\n point = instance.point\n if not point:\n point = ''\n return point\n\n\nclass ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n title = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n point_label = serializers.SerializerMethodField()\n cost = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n status = serializers.SerializerMethodField()\n participation = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n participation_display = serializers.SerializerMethodField()\n future_phase = serializers.SerializerMethodField()\n active_phase = serializers.SerializerMethodField()\n past_phase = serializers.SerializerMethodField()\n tile_image = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'tile_image',\n 'tile_image_copyright',\n 'point', 'point_label', 'cost',\n 'district', 'topic',\n 'status',\n 'participation_string',\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n 'past_phase', 'plan_url', 'plan_title']\n\n def _get_participation_status_project(self, instance):\n if instance.phases.active_phases():\n return _('running'), True\n elif instance.phases.future_phases():\n try:\n return (_('starts at {}').format\n (instance.phases.future_phases().first().\n start_date.date()),\n True)\n except AttributeError:\n return (_('starts in the future'),\n True)\n else:\n return _('done'), False\n\n def get_type(self, instance):\n return 'project'\n\n def get_subtype(self, instance):\n subtype = get_project_type(instance)\n if subtype in ('external', 'bplan'):\n return 'external'\n return subtype\n\n def get_title(self, instance):\n return instance.name\n\n def get_url(self, instance):\n if get_project_type(instance) in ('external', 'bplan'):\n return instance.externalproject.url\n return instance.get_absolute_url()\n\n def get_organisation(self, instance):\n return instance.organisation.name\n\n def get_tile_image(self, instance):\n image_url = ''\n if instance.tile_image:\n image = get_thumbnailer(instance.tile_image)['project_tile']\n image_url = image.url\n return image_url\n\n def get_point_label(self, instance):\n return ''\n\n def get_cost(self, instance):\n return ''\n\n def get_status(self, instance):\n if instance.phases.active_phases() or instance.phases.future_phases():\n return 2\n return 3\n\n def get_participation(self, instance):\n return 1\n\n def get_participation_display(self, instance):\n return _('Yes')\n\n def get_future_phase(self, instance):\n if (instance.future_phases and\n instance.future_phases.first().start_date):\n return str(\n instance.future_phases.first().start_date.date())\n return False\n\n def get_active_phase(self, instance):\n if instance.active_phase:\n progress = instance.active_phase_progress\n time_left = instance.time_left\n return [progress, time_left]\n return False\n\n def get_past_phase(self, instance):\n if instance.phases.past_phases():\n return True\n return False\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return participation_active\n\n def get_plan_url(self, instance):\n if instance.plans.exists():\n return instance.plans.first().get_absolute_url()\n return None\n\n def get_plan_title(self, instance):\n if instance.plans.exists():\n return instance.plans.first().title\n return None\n\n\nclass PlanSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Plan\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'point',\n 'point_label', 'cost',\n 'district', 'topic', 'status',\n 'participation',\n 'participation_string',\n 'participation_active',\n 'published_projects_count']\n\n def get_subtype(self, instance):\n return 'plan'\n\n def _get_participation_string(self, projects):\n future_phase = None\n for project in projects:\n phases = project.phases\n if phases.active_phases():\n return _('running')\n if phases.future_phases() and \\\n phases.future_phases().first().start_date:\n date = phases.future_phases().first().start_date\n if not future_phase:\n future_phase = date\n else:\n if date < future_phase:\n future_phase = date\n\n if future_phase:\n return _('starts at {}').format(future_phase.date())\n\n def _get_participation_status_plan(self, item):\n projects = item.projects.all() \\\n .filter(is_draft=False,\n is_archived=False,\n is_public=True)\n if not projects:\n return item.get_participation_display(), False\n else:\n participation_string = self._get_participation_string(projects)\n if participation_string:\n return participation_string, True\n else:\n return item.get_participation_display(), False\n\n def get_type(self, instance):\n return 'plan'\n\n def get_url(self, instance):\n return instance.get_absolute_url()\n\n def get_published_projects_count(self, instance):\n return instance.published_projects.count()\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return participation_active\n", "path": "meinberlin/apps/plans/serializers.py"}], "after_files": [{"content": "from django.utils.translation import ugettext as _\nfrom easy_thumbnails.files import get_thumbnailer\nfrom rest_framework import serializers\n\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\nfrom .models import Plan\n\n\nclass CommonFields:\n\n def get_district(self, instance):\n city_wide = _('City wide')\n district_name = str(city_wide)\n if instance.administrative_district:\n district_name = instance.administrative_district.name\n return district_name\n\n def get_point(self, instance):\n point = instance.point\n if not point:\n point = ''\n return point\n\n\nclass ProjectSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n title = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n organisation = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n point_label = serializers.SerializerMethodField()\n cost = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n status = serializers.SerializerMethodField()\n participation = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n participation_display = serializers.SerializerMethodField()\n future_phase = serializers.SerializerMethodField()\n active_phase = serializers.SerializerMethodField()\n past_phase = serializers.SerializerMethodField()\n tile_image = serializers.SerializerMethodField()\n plan_url = serializers.SerializerMethodField()\n plan_title = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'tile_image',\n 'tile_image_copyright',\n 'point', 'point_label', 'cost',\n 'district', 'topic',\n 'status',\n 'participation_string',\n 'participation_active',\n 'participation', 'participation_display', 'description',\n 'future_phase', 'active_phase',\n 'past_phase', 'plan_url', 'plan_title',\n 'published_projects_count']\n\n def _get_participation_status_project(self, instance):\n if instance.phases.active_phases():\n return _('running'), True\n elif instance.phases.future_phases():\n try:\n return (_('starts at {}').format\n (instance.phases.future_phases().first().\n start_date.date()),\n True)\n except AttributeError:\n return (_('starts in the future'),\n True)\n else:\n return _('done'), False\n\n def get_type(self, instance):\n return 'project'\n\n def get_subtype(self, instance):\n subtype = get_project_type(instance)\n if subtype in ('external', 'bplan'):\n return 'external'\n return subtype\n\n def get_title(self, instance):\n return instance.name\n\n def get_url(self, instance):\n if get_project_type(instance) in ('external', 'bplan'):\n return instance.externalproject.url\n return instance.get_absolute_url()\n\n def get_organisation(self, instance):\n return instance.organisation.name\n\n def get_tile_image(self, instance):\n image_url = ''\n if instance.tile_image:\n image = get_thumbnailer(instance.tile_image)['project_tile']\n image_url = image.url\n return image_url\n\n def get_point_label(self, instance):\n return ''\n\n def get_cost(self, instance):\n return ''\n\n def get_status(self, instance):\n if instance.phases.active_phases() or instance.phases.future_phases():\n return 2\n return 3\n\n def get_participation(self, instance):\n return 1\n\n def get_participation_display(self, instance):\n return _('Yes')\n\n def get_future_phase(self, instance):\n if (instance.future_phases and\n instance.future_phases.first().start_date):\n return str(\n instance.future_phases.first().start_date.date())\n return False\n\n def get_active_phase(self, instance):\n if instance.active_phase:\n progress = instance.active_phase_progress\n time_left = instance.time_left\n return [progress, time_left]\n return False\n\n def get_past_phase(self, instance):\n if instance.phases.past_phases():\n return True\n return False\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_project(instance)\n return participation_active\n\n def get_plan_url(self, instance):\n if instance.plans.exists():\n return instance.plans.first().get_absolute_url()\n return None\n\n def get_plan_title(self, instance):\n if instance.plans.exists():\n return instance.plans.first().title\n return None\n\n def get_published_projects_count(self, instance):\n if hasattr(instance, 'projectcontainer') and instance.projectcontainer:\n return instance.projectcontainer.active_project_count\n\n\nclass PlanSerializer(serializers.ModelSerializer, CommonFields):\n type = serializers.SerializerMethodField()\n subtype = serializers.SerializerMethodField()\n url = serializers.SerializerMethodField()\n district = serializers.SerializerMethodField()\n point = serializers.SerializerMethodField()\n participation_active = serializers.SerializerMethodField()\n participation_string = serializers.SerializerMethodField()\n published_projects_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Plan\n fields = ['type', 'subtype', 'title', 'url',\n 'organisation', 'point',\n 'point_label', 'cost',\n 'district', 'topic', 'status',\n 'participation',\n 'participation_string',\n 'participation_active',\n 'published_projects_count']\n\n def get_subtype(self, instance):\n return 'plan'\n\n def _get_participation_string(self, projects):\n future_phase = None\n for project in projects:\n phases = project.phases\n if phases.active_phases():\n return _('running')\n if phases.future_phases() and \\\n phases.future_phases().first().start_date:\n date = phases.future_phases().first().start_date\n if not future_phase:\n future_phase = date\n else:\n if date < future_phase:\n future_phase = date\n\n if future_phase:\n return _('starts at {}').format(future_phase.date())\n\n def _get_participation_status_plan(self, item):\n projects = item.projects.all() \\\n .filter(is_draft=False,\n is_archived=False,\n is_public=True)\n if not projects:\n return item.get_participation_display(), False\n else:\n participation_string = self._get_participation_string(projects)\n if participation_string:\n return participation_string, True\n else:\n return item.get_participation_display(), False\n\n def get_type(self, instance):\n return 'plan'\n\n def get_url(self, instance):\n return instance.get_absolute_url()\n\n def get_published_projects_count(self, instance):\n return instance.published_projects.count()\n\n def get_participation_string(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return str(participation_string)\n\n def get_participation_active(self, instance):\n participation_string, participation_active = \\\n self._get_participation_status_plan(instance)\n return participation_active\n", "path": "meinberlin/apps/plans/serializers.py"}]}
| 2,411 | 296 |
gh_patches_debug_13070
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-116
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong links to images from entry page
If I open an entry page, for example
http://changelog.linfiniti.com/qgis/2.8/entry/more-responsive-browser/ and then click on the image I get server error 404 because the link is pointing to
/qgis/2.8/entry/<entry name>/images/entries/<hash>.<ext>
instead of
/media/images/entries/<hash>.<ext>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/core/settings/base.py`
Content:
```
1 # coding=utf-8
2 """
3 core.settings.base
4 """
5 # Django settings for projecta project.
6
7 from .utils import absolute_path
8
9 ADMINS = (
10 ('Tim Sutton', '[email protected]'),
11 )
12 SERVER_EMAIL = '[email protected]'
13 EMAIL_HOST = 'localhost'
14 DEFAULT_FROM_EMAIL = '[email protected]'
15
16 MANAGERS = ADMINS
17
18 # Local time zone for this installation. Choices can be found here:
19 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
20 # although not all choices may be available on all operating systems.
21 # In a Windows environment this must be set to your system time zone.
22 TIME_ZONE = 'America/Chicago'
23
24 # Language code for this installation. All choices can be found here:
25 # http://www.i18nguy.com/unicode/language-identifiers.html
26 LANGUAGE_CODE = 'en-us'
27
28 SITE_ID = 1
29
30 # If you set this to False, Django will make some optimizations so as not
31 # to load the internationalization machinery.
32 USE_I18N = True
33
34 # If you set this to False, Django will not format dates, numbers and
35 # calendars according to the current locale.
36 USE_L10N = True
37
38 # If you set this to False, Django will not use timezone-aware datetimes.
39 USE_TZ = True
40
41 # Absolute filesystem path to the directory that will hold user-uploaded files.
42 # Example: "/var/www/example.com/media/"
43 MEDIA_ROOT = absolute_path('media')
44
45 # URL that handles the media served from MEDIA_ROOT. Make sure to use a
46 # trailing slash.
47 # Examples: "http://example.com/media/", "http://media.example.com/"
48 # MEDIA_URL = '/media/'
49 # setting full MEDIA_URL to be able to use it for the feeds
50 MEDIA_URL = '/media/'
51
52 # Absolute path to the directory static files should be collected to.
53 # Don't put anything in this directory yourself; store your static files
54 # in apps' "static/" subdirectories and in STATICFILES_DIRS.
55 # Example: "/var/www/example.com/static/"
56 STATIC_ROOT = absolute_path('static')
57
58 # URL prefix for static files.
59 # Example: "http://example.com/static/", "http://static.example.com/"
60 STATIC_URL = '/static/'
61
62 # Additional locations of static files
63 STATICFILES_DIRS = (
64 # Put strings here, like "/home/html/static" or "C:/www/django/static".
65 # Always use forward slashes, even on Windows.
66 # Don't forget to use absolute paths, not relative paths.
67 absolute_path('core', 'base_static'),
68 )
69
70 # List of finder classes that know how to find static files in
71 # various locations.
72 STATICFILES_FINDERS = (
73 'django.contrib.staticfiles.finders.FileSystemFinder',
74 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
75 # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
76 )
77
78 # import SECRET_KEY into current namespace
79 # noinspection PyUnresolvedReferences
80 from .secret import SECRET_KEY # noqa
81
82 # List of callables that know how to import templates from various sources.
83 TEMPLATE_LOADERS = (
84 'django.template.loaders.filesystem.Loader',
85 'django.template.loaders.app_directories.Loader',
86 # 'django.template.loaders.eggs.Loader',
87 )
88
89 TEMPLATE_CONTEXT_PROCESSORS = (
90 'django.contrib.auth.context_processors.auth',
91 'django.core.context_processors.request',
92 'core.context_processors.add_intercom_app_id'
93 )
94
95 MIDDLEWARE_CLASSES = (
96 'django.middleware.common.CommonMiddleware',
97 'django.contrib.sessions.middleware.SessionMiddleware',
98 'django.middleware.csrf.CsrfViewMiddleware',
99 'django.contrib.auth.middleware.AuthenticationMiddleware',
100 'django.contrib.messages.middleware.MessageMiddleware',
101 # Uncomment the next line for simple clickjacking protection:
102 # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
103 'core.custom_middleware.NavContextMiddleware'
104 )
105
106 ROOT_URLCONF = 'core.urls'
107
108 # Python dotted path to the WSGI application used by Django's runserver.
109 WSGI_APPLICATION = 'core.wsgi.application'
110
111 TEMPLATE_DIRS = (
112 # project level templates
113 absolute_path('core', 'base_templates'),
114 absolute_path('vota', 'templates'),
115 absolute_path('changes', 'templates'),
116
117 )
118
119 INSTALLED_APPS = (
120 'django.contrib.auth',
121 'django.contrib.contenttypes',
122 'django.contrib.sessions',
123 'django.contrib.sites',
124 'django.contrib.messages',
125 'django.contrib.staticfiles',
126 'django.contrib.admin',
127 'django.contrib.syndication',
128 )
129
130 # A sample logging configuration. The only tangible logging
131 # performed by this configuration is to send an email to
132 # the site admins on every HTTP 500 error when DEBUG=False.
133 # See http://docs.djangoproject.com/en/dev/topics/logging for
134 # more details on how to customize your logging configuration.
135 LOGGING = {
136 'version': 1,
137 'disable_existing_loggers': False,
138 'filters': {
139 'require_debug_false': {
140 '()': 'django.utils.log.RequireDebugFalse'
141 }
142 },
143 'handlers': {
144 'mail_admins': {
145 'level': 'ERROR',
146 'filters': ['require_debug_false'],
147 'class': 'django.utils.log.AdminEmailHandler'
148 }
149 },
150 'loggers': {
151 'django.request': {
152 'handlers': ['mail_admins'],
153 'level': 'ERROR',
154 'propagate': True,
155 },
156 }
157 }
158
```
Path: `django_project/vota/models/ballot.py`
Content:
```
1 """
2 This model is to create "ballots" i.e. questions/proposals/changes which a
3 Committee can vote on.
4
5 After voting is complete, a ballot should be marked as either Denied or Passed.
6
7 If no quorum is reached, no_quorum should be True
8
9 A ballot has one Committee.
10 """
11 from django.core.urlresolvers import reverse
12 from django.utils.text import slugify
13 import logging
14 from core.settings.contrib import STOP_WORDS
15
16 logger = logging.getLogger(__name__)
17 from django.db import models
18 from audited_models.models import AuditedModel
19 from django.utils.translation import ugettext_lazy as _
20 from django.utils import timezone
21 from vota.models.vote import Vote
22 import datetime
23 from django.contrib.auth.models import User
24
25
26 class ApprovedCategoryManager(models.Manager):
27 """Custom category manager that shows only approved ballots."""
28
29 def get_query_set(self):
30 """Query set generator"""
31 return super(
32 ApprovedCategoryManager, self).get_query_set().filter(
33 approved=True)
34
35
36 class DeniedCategoryManager(models.Manager):
37 """Custom version manager that shows only denied ballots."""
38
39 def get_query_set(self):
40 """Query set generator"""
41 return super(
42 DeniedCategoryManager, self).get_query_set().filter(
43 denied=True)
44
45
46 class OpenBallotManager(models.Manager):
47 """Custom version manager that shows only open ballots."""
48
49 def get_query_set(self):
50 """Query set generator"""
51 return super(
52 OpenBallotManager, self).get_query_set().filter(
53 open_from__lt=timezone.now()).filter(closes__gt=timezone.now())
54
55
56 class ClosedBallotManager(models.Manager):
57 """Custom version manager that shows only closed ballots."""
58
59 def get_query_set(self):
60 """Query set generator"""
61 return super(
62 ClosedBallotManager, self).get_query_set().filter(
63 closes__gt=timezone.now())
64
65
66 class Ballot(AuditedModel):
67 """A category model e.g. gui, backend, web site etc."""
68 name = models.CharField(
69 help_text=_('Name of this ballot.'),
70 max_length=255,
71 null=False,
72 blank=False,
73 unique=False
74 ) # there is a unique together rule in meta class below
75
76 summary = models.CharField(
77 help_text=_('A brief overview of the ballot.'),
78 max_length=250,
79 blank=False,
80 null=False
81 )
82
83 description = models.TextField(
84 help_text=_('A full description of the proposal if a summary is not '
85 'enough!'),
86 max_length=3000,
87 null=True,
88 blank=True,
89 )
90
91 approved = models.BooleanField(
92 help_text=_(
93 'Whether this ballot has been approved.'),
94 default=False
95 )
96
97 denied = models.BooleanField(
98 help_text=_(
99 'Whether this ballot has been denied.'),
100 default=False
101 )
102
103 no_quorum = models.BooleanField(
104 help_text=_('Whether the ballot was denied because no quorum was '
105 'reached'),
106 default=False
107 )
108
109 open_from = models.DateTimeField(
110 help_text=_('Date the ballot opens'),
111 default=timezone.now()
112 )
113
114 closes = models.DateTimeField(
115 help_text=_('Date the ballot closes'),
116 default=timezone.now() + datetime.timedelta(days=7)
117 )
118
119 private = models.BooleanField(
120 help_text=_('Should members be prevented from viewing results before '
121 'voting?'),
122 default=False
123 )
124
125 proposer = models.ForeignKey(User)
126 # noinspection PyUnresolvedReferences
127 committee = models.ForeignKey('Committee')
128 slug = models.SlugField()
129 objects = models.Manager()
130 approved_objects = ApprovedCategoryManager()
131 denied_objects = DeniedCategoryManager()
132 open_objects = OpenBallotManager()
133 closed_objects = ClosedBallotManager()
134
135 # noinspection PyClassicStyleClass
136 class Meta:
137 """Meta options for the category class."""
138 unique_together = (
139 ('name', 'committee'),
140 ('committee', 'slug')
141 )
142 app_label = 'vota'
143
144 def save(self, *args, **kwargs):
145 if not self.pk:
146 words = self.name.split()
147 filtered_words = [t for t in words if t.lower() not in STOP_WORDS]
148 new_list = ' '.join(filtered_words)
149 self.slug = slugify(new_list)[:50]
150 super(Ballot, self).save(*args, **kwargs)
151
152 def __unicode__(self):
153 return u'%s : %s' % (self.committee.name, self.name)
154
155 def get_absolute_url(self):
156 return reverse('ballot-detail', kwargs={
157 'project_slug': self.committee.project.slug,
158 'committee_slug': self.committee.slug,
159 'slug': self.slug
160 })
161
162 def get_user_voted(self, user=None):
163 voted = False
164 if Vote.objects.filter(ballot=self).filter(user=user).exists():
165 voted = True
166 return voted
167
168 def get_positive_vote_count(self):
169 votes = Vote.objects.filter(ballot=self).filter(choice='y').count()
170 return votes
171
172 def get_negative_vote_count(self):
173 votes = Vote.objects.filter(ballot=self).filter(choice='n').count()
174 return votes
175
176 def get_abstainer_count(self):
177 votes = Vote.objects.filter(ballot=self).filter(choice='-').count()
178 return votes
179
180 def get_current_tally(self):
181 positive = self.get_positive_vote_count()
182 negative = self.get_negative_vote_count()
183 tally = 0
184 tally += positive
185 tally -= negative
186 return tally
187
188 def get_total_vote_count(self):
189 vote_count = Vote.objects.filter(ballot=self).count()
190 return vote_count
191
192 def has_quorum(self):
193 vote_count = self.get_total_vote_count()
194 committee_user_count = self.committee.users.all().count()
195 if committee_user_count != 0:
196 quorum_percent = self.committee.quorum_setting
197 percentage = 100 * float(vote_count) / float(committee_user_count)
198 if percentage > quorum_percent:
199 return True
200 else:
201 return False
202
203 def is_open(self):
204 open_date = self.open_from
205 close_date = self.closes
206 if open_date < timezone.now() < close_date:
207 return True
208 return False
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py
--- a/django_project/core/settings/base.py
+++ b/django_project/core/settings/base.py
@@ -89,6 +89,7 @@
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
+ 'django.core.context_processors.media',
'core.context_processors.add_intercom_app_id'
)
diff --git a/django_project/vota/models/ballot.py b/django_project/vota/models/ballot.py
--- a/django_project/vota/models/ballot.py
+++ b/django_project/vota/models/ballot.py
@@ -108,7 +108,7 @@
open_from = models.DateTimeField(
help_text=_('Date the ballot opens'),
- default=timezone.now()
+ default=timezone.now
)
closes = models.DateTimeField(
|
{"golden_diff": "diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py\n--- a/django_project/core/settings/base.py\n+++ b/django_project/core/settings/base.py\n@@ -89,6 +89,7 @@\n TEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n+ 'django.core.context_processors.media',\n 'core.context_processors.add_intercom_app_id'\n )\n \ndiff --git a/django_project/vota/models/ballot.py b/django_project/vota/models/ballot.py\n--- a/django_project/vota/models/ballot.py\n+++ b/django_project/vota/models/ballot.py\n@@ -108,7 +108,7 @@\n \n open_from = models.DateTimeField(\n help_text=_('Date the ballot opens'),\n- default=timezone.now()\n+ default=timezone.now\n )\n \n closes = models.DateTimeField(\n", "issue": "Wrong links to images from entry page\nIf I open an entry page, for example \n http://changelog.linfiniti.com/qgis/2.8/entry/more-responsive-browser/ and then click on the image I get server error 404 because the link is pointing to \n /qgis/2.8/entry/<entry name>/images/entries/<hash>.<ext>\ninstead of \n /media/images/entries/<hash>.<ext>\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\ncore.settings.base\n\"\"\"\n# Django settings for projecta project.\n\nfrom .utils import absolute_path\n\nADMINS = (\n ('Tim Sutton', '[email protected]'),\n)\nSERVER_EMAIL = '[email protected]'\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = absolute_path('media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n# MEDIA_URL = '/media/'\n# setting full MEDIA_URL to be able to use it for the feeds\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = absolute_path('static')\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n absolute_path('core', 'base_static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# import SECRET_KEY into current namespace\n# noinspection PyUnresolvedReferences\nfrom .secret import SECRET_KEY # noqa\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'core.context_processors.add_intercom_app_id'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.custom_middleware.NavContextMiddleware'\n)\n\nROOT_URLCONF = 'core.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'core.wsgi.application'\n\nTEMPLATE_DIRS = (\n # project level templates\n absolute_path('core', 'base_templates'),\n absolute_path('vota', 'templates'),\n absolute_path('changes', 'templates'),\n\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.syndication',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "django_project/core/settings/base.py"}, {"content": "\"\"\"\nThis model is to create \"ballots\" i.e. questions/proposals/changes which a\nCommittee can vote on.\n\nAfter voting is complete, a ballot should be marked as either Denied or Passed.\n\nIf no quorum is reached, no_quorum should be True\n\nA ballot has one Committee.\n\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nimport logging\nfrom core.settings.contrib import STOP_WORDS\n\nlogger = logging.getLogger(__name__)\nfrom django.db import models\nfrom audited_models.models import AuditedModel\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\nfrom vota.models.vote import Vote\nimport datetime\nfrom django.contrib.auth.models import User\n\n\nclass ApprovedCategoryManager(models.Manager):\n \"\"\"Custom category manager that shows only approved ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedCategoryManager, self).get_query_set().filter(\n approved=True)\n\n\nclass DeniedCategoryManager(models.Manager):\n \"\"\"Custom version manager that shows only denied ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n DeniedCategoryManager, self).get_query_set().filter(\n denied=True)\n\n\nclass OpenBallotManager(models.Manager):\n \"\"\"Custom version manager that shows only open ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n OpenBallotManager, self).get_query_set().filter(\n open_from__lt=timezone.now()).filter(closes__gt=timezone.now())\n\n\nclass ClosedBallotManager(models.Manager):\n \"\"\"Custom version manager that shows only closed ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ClosedBallotManager, self).get_query_set().filter(\n closes__gt=timezone.now())\n\n\nclass Ballot(AuditedModel):\n \"\"\"A category model e.g. gui, backend, web site etc.\"\"\"\n name = models.CharField(\n help_text=_('Name of this ballot.'),\n max_length=255,\n null=False,\n blank=False,\n unique=False\n ) # there is a unique together rule in meta class below\n\n summary = models.CharField(\n help_text=_('A brief overview of the ballot.'),\n max_length=250,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n help_text=_('A full description of the proposal if a summary is not '\n 'enough!'),\n max_length=3000,\n null=True,\n blank=True,\n )\n\n approved = models.BooleanField(\n help_text=_(\n 'Whether this ballot has been approved.'),\n default=False\n )\n\n denied = models.BooleanField(\n help_text=_(\n 'Whether this ballot has been denied.'),\n default=False\n )\n\n no_quorum = models.BooleanField(\n help_text=_('Whether the ballot was denied because no quorum was '\n 'reached'),\n default=False\n )\n\n open_from = models.DateTimeField(\n help_text=_('Date the ballot opens'),\n default=timezone.now()\n )\n\n closes = models.DateTimeField(\n help_text=_('Date the ballot closes'),\n default=timezone.now() + datetime.timedelta(days=7)\n )\n\n private = models.BooleanField(\n help_text=_('Should members be prevented from viewing results before '\n 'voting?'),\n default=False\n )\n\n proposer = models.ForeignKey(User)\n # noinspection PyUnresolvedReferences\n committee = models.ForeignKey('Committee')\n slug = models.SlugField()\n objects = models.Manager()\n approved_objects = ApprovedCategoryManager()\n denied_objects = DeniedCategoryManager()\n open_objects = OpenBallotManager()\n closed_objects = ClosedBallotManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the category class.\"\"\"\n unique_together = (\n ('name', 'committee'),\n ('committee', 'slug')\n )\n app_label = 'vota'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(Ballot, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s : %s' % (self.committee.name, self.name)\n\n def get_absolute_url(self):\n return reverse('ballot-detail', kwargs={\n 'project_slug': self.committee.project.slug,\n 'committee_slug': self.committee.slug,\n 'slug': self.slug\n })\n\n def get_user_voted(self, user=None):\n voted = False\n if Vote.objects.filter(ballot=self).filter(user=user).exists():\n voted = True\n return voted\n\n def get_positive_vote_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='y').count()\n return votes\n\n def get_negative_vote_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='n').count()\n return votes\n\n def get_abstainer_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='-').count()\n return votes\n\n def get_current_tally(self):\n positive = self.get_positive_vote_count()\n negative = self.get_negative_vote_count()\n tally = 0\n tally += positive\n tally -= negative\n return tally\n\n def get_total_vote_count(self):\n vote_count = Vote.objects.filter(ballot=self).count()\n return vote_count\n\n def has_quorum(self):\n vote_count = self.get_total_vote_count()\n committee_user_count = self.committee.users.all().count()\n if committee_user_count != 0:\n quorum_percent = self.committee.quorum_setting\n percentage = 100 * float(vote_count) / float(committee_user_count)\n if percentage > quorum_percent:\n return True\n else:\n return False\n\n def is_open(self):\n open_date = self.open_from\n close_date = self.closes\n if open_date < timezone.now() < close_date:\n return True\n return False\n", "path": "django_project/vota/models/ballot.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\ncore.settings.base\n\"\"\"\n# Django settings for projecta project.\n\nfrom .utils import absolute_path\n\nADMINS = (\n ('Tim Sutton', '[email protected]'),\n)\nSERVER_EMAIL = '[email protected]'\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = absolute_path('media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n# MEDIA_URL = '/media/'\n# setting full MEDIA_URL to be able to use it for the feeds\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = absolute_path('static')\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n absolute_path('core', 'base_static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# import SECRET_KEY into current namespace\n# noinspection PyUnresolvedReferences\nfrom .secret import SECRET_KEY # noqa\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'core.context_processors.add_intercom_app_id'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.custom_middleware.NavContextMiddleware'\n)\n\nROOT_URLCONF = 'core.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'core.wsgi.application'\n\nTEMPLATE_DIRS = (\n # project level templates\n absolute_path('core', 'base_templates'),\n absolute_path('vota', 'templates'),\n absolute_path('changes', 'templates'),\n\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.syndication',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "django_project/core/settings/base.py"}, {"content": "\"\"\"\nThis model is to create \"ballots\" i.e. questions/proposals/changes which a\nCommittee can vote on.\n\nAfter voting is complete, a ballot should be marked as either Denied or Passed.\n\nIf no quorum is reached, no_quorum should be True\n\nA ballot has one Committee.\n\"\"\"\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nimport logging\nfrom core.settings.contrib import STOP_WORDS\n\nlogger = logging.getLogger(__name__)\nfrom django.db import models\nfrom audited_models.models import AuditedModel\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\nfrom vota.models.vote import Vote\nimport datetime\nfrom django.contrib.auth.models import User\n\n\nclass ApprovedCategoryManager(models.Manager):\n \"\"\"Custom category manager that shows only approved ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedCategoryManager, self).get_query_set().filter(\n approved=True)\n\n\nclass DeniedCategoryManager(models.Manager):\n \"\"\"Custom version manager that shows only denied ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n DeniedCategoryManager, self).get_query_set().filter(\n denied=True)\n\n\nclass OpenBallotManager(models.Manager):\n \"\"\"Custom version manager that shows only open ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n OpenBallotManager, self).get_query_set().filter(\n open_from__lt=timezone.now()).filter(closes__gt=timezone.now())\n\n\nclass ClosedBallotManager(models.Manager):\n \"\"\"Custom version manager that shows only closed ballots.\"\"\"\n\n def get_query_set(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ClosedBallotManager, self).get_query_set().filter(\n closes__gt=timezone.now())\n\n\nclass Ballot(AuditedModel):\n \"\"\"A category model e.g. gui, backend, web site etc.\"\"\"\n name = models.CharField(\n help_text=_('Name of this ballot.'),\n max_length=255,\n null=False,\n blank=False,\n unique=False\n ) # there is a unique together rule in meta class below\n\n summary = models.CharField(\n help_text=_('A brief overview of the ballot.'),\n max_length=250,\n blank=False,\n null=False\n )\n\n description = models.TextField(\n help_text=_('A full description of the proposal if a summary is not '\n 'enough!'),\n max_length=3000,\n null=True,\n blank=True,\n )\n\n approved = models.BooleanField(\n help_text=_(\n 'Whether this ballot has been approved.'),\n default=False\n )\n\n denied = models.BooleanField(\n help_text=_(\n 'Whether this ballot has been denied.'),\n default=False\n )\n\n no_quorum = models.BooleanField(\n help_text=_('Whether the ballot was denied because no quorum was '\n 'reached'),\n default=False\n )\n\n open_from = models.DateTimeField(\n help_text=_('Date the ballot opens'),\n default=timezone.now\n )\n\n closes = models.DateTimeField(\n help_text=_('Date the ballot closes'),\n default=timezone.now() + datetime.timedelta(days=7)\n )\n\n private = models.BooleanField(\n help_text=_('Should members be prevented from viewing results before '\n 'voting?'),\n default=False\n )\n\n proposer = models.ForeignKey(User)\n # noinspection PyUnresolvedReferences\n committee = models.ForeignKey('Committee')\n slug = models.SlugField()\n objects = models.Manager()\n approved_objects = ApprovedCategoryManager()\n denied_objects = DeniedCategoryManager()\n open_objects = OpenBallotManager()\n closed_objects = ClosedBallotManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the category class.\"\"\"\n unique_together = (\n ('name', 'committee'),\n ('committee', 'slug')\n )\n app_label = 'vota'\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(Ballot, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u'%s : %s' % (self.committee.name, self.name)\n\n def get_absolute_url(self):\n return reverse('ballot-detail', kwargs={\n 'project_slug': self.committee.project.slug,\n 'committee_slug': self.committee.slug,\n 'slug': self.slug\n })\n\n def get_user_voted(self, user=None):\n voted = False\n if Vote.objects.filter(ballot=self).filter(user=user).exists():\n voted = True\n return voted\n\n def get_positive_vote_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='y').count()\n return votes\n\n def get_negative_vote_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='n').count()\n return votes\n\n def get_abstainer_count(self):\n votes = Vote.objects.filter(ballot=self).filter(choice='-').count()\n return votes\n\n def get_current_tally(self):\n positive = self.get_positive_vote_count()\n negative = self.get_negative_vote_count()\n tally = 0\n tally += positive\n tally -= negative\n return tally\n\n def get_total_vote_count(self):\n vote_count = Vote.objects.filter(ballot=self).count()\n return vote_count\n\n def has_quorum(self):\n vote_count = self.get_total_vote_count()\n committee_user_count = self.committee.users.all().count()\n if committee_user_count != 0:\n quorum_percent = self.committee.quorum_setting\n percentage = 100 * float(vote_count) / float(committee_user_count)\n if percentage > quorum_percent:\n return True\n else:\n return False\n\n def is_open(self):\n open_date = self.open_from\n close_date = self.closes\n if open_date < timezone.now() < close_date:\n return True\n return False\n", "path": "django_project/vota/models/ballot.py"}]}
| 3,783 | 203 |
gh_patches_debug_35267
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-6952
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`hook-pkg_resources.py` is including hidden imports that are not available
https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20
and
https://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30
Refer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238).
The `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this:
```python
if is_module_satisfies("setuptools >= 45.0.0, < 49.1.1"):
hiddenimports.append('pkg_resources.py2_warn')
```
As for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely.
Another module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't.
For example:
```python
from PyInstaller.utils.hooks import (
collect_submodules,
is_module_satisfies,
can_import_module,
is_module_or_submodule
)
hiddenimports = []
# pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose
# them as pkg_resources.extern.*
if not can_import_module('railroad'):
# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so
# filter it out when `railroad` is not available.
hiddenimports += collect_submodules(
'pkg_resources._vendor',
lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams'))
else:
hiddenimports += collect_submodules('pkg_resources._vendor')
```
PyInstaller: v5.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-pkg_resources.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2022, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies
13
14 # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose
15 # them as pkg_resources.extern.*
16 hiddenimports = collect_submodules('pkg_resources._vendor')
17
18 # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if
19 # we would replace this by a fake module.
20 hiddenimports.append('pkg_resources.py2_warn')
21
22 excludedimports = ['__main__']
23
24 # Some more hidden imports. See:
25 # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be
26 # its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if
27 # present.
28 hiddenimports += collect_submodules('packaging')
29
30 hiddenimports += ['pkg_resources.markers']
31
32 # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco
33 # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook
34 # utilities.
35 #
36 # In setuptools 60.7.0, the vendored jaraco.text package included "Lorem Ipsum.txt" data file, which also has to be
37 # collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's
38 # redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.
39 if is_module_satisfies("setuptools == 60.7.0"):
40 raise SystemExit(
41 "ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. "
42 "Downgrade to an earlier version or upgrade to a later version."
43 )
44 # In setuptools 60.7.1, the "Lorem Ipsum.txt" data file was dropped from the vendored jaraco.text package, so we can
45 # accommodate it with couple of hidden imports.
46 elif is_module_satisfies("setuptools >= 60.7.1"):
47 hiddenimports += [
48 'pkg_resources._vendor.jaraco.functools',
49 'pkg_resources._vendor.jaraco.context',
50 'pkg_resources._vendor.jaraco.text',
51 ]
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py
--- a/PyInstaller/hooks/hook-pkg_resources.py
+++ b/PyInstaller/hooks/hook-pkg_resources.py
@@ -9,26 +9,32 @@
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
-from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies
+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module
# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose
# them as pkg_resources.extern.*
-hiddenimports = collect_submodules('pkg_resources._vendor')
+
+# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so
+# filter it out when `railroad` is not available.
+if can_import_module('railroad'):
+ hiddenimports = collect_submodules('pkg_resources._vendor')
+else:
+ hiddenimports = collect_submodules(
+ 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name
+ )
# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if
# we would replace this by a fake module.
-hiddenimports.append('pkg_resources.py2_warn')
+if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):
+ hiddenimports.append('pkg_resources.py2_warn')
excludedimports = ['__main__']
# Some more hidden imports. See:
# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be
-# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if
-# present.
+# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.
hiddenimports += collect_submodules('packaging')
-hiddenimports += ['pkg_resources.markers']
-
# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco
# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook
# utilities.
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -9,26 +9,32 @@\n # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n #-----------------------------------------------------------------------------\n \n-from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n+from PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module\n \n # pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n # them as pkg_resources.extern.*\n-hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n+# filter it out when `railroad` is not available.\n+if can_import_module('railroad'):\n+ hiddenimports = collect_submodules('pkg_resources._vendor')\n+else:\n+ hiddenimports = collect_submodules(\n+ 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n+ )\n \n # pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n # we would replace this by a fake module.\n-hiddenimports.append('pkg_resources.py2_warn')\n+if is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):\n+ hiddenimports.append('pkg_resources.py2_warn')\n \n excludedimports = ['__main__']\n \n # Some more hidden imports. See:\n # https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n-# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n-# present.\n+# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\n hiddenimports += collect_submodules('packaging')\n \n-hiddenimports += ['pkg_resources.markers']\n-\n # As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n # namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n # utilities.\n", "issue": "`hook-pkg_resources.py` is including hidden imports that are not available\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L20\r\n\r\nand\r\n\r\nhttps://github.com/pyinstaller/pyinstaller/blob/3b5d233d02472ad9d589a798d19ad3d3df645223/PyInstaller/hooks/hook-pkg_resources.py#L30\r\n\r\nRefer to https://github.com/pypa/setuptools/issues/1963 for the past need for including `py2_warn` and [the subsequent removal of py2_warn from setuptools](https://github.com/pypa/setuptools/pull/2238).\r\n\r\nThe `pkg_resources.py2_warn` submodule was introduced in v45.0.0 and removed in v49.1.1. Therefore, an check for those versions would look like this:\r\n```python\r\nif is_module_satisfies(\"setuptools >= 45.0.0, < 49.1.1\"):\r\n hiddenimports.append('pkg_resources.py2_warn')\r\n```\r\n\r\nAs for `pkg_resources.markers`, I haven't found any references to any `pkg_resources.markers` packages through Google searches. I'm not convinced that it ever existed at any point. Perhaps it was being confused for (or just a typo of) `packaging.markers`, which does exist. Since `hook-pkg_resources.py` already collects the submodules of `packaging`, there doesn't need to be an extra hidden import added for `packaging.markers`. Therefore, I think that the `pkg_resources.markers` hidden import line can be removed completely.\r\n\r\nAnother module that it is trying to include is `pkg_resources._vendor.pyparsing.diagrams`. This module can only be used when the `railroad` module is available. Therefore, a check should be added to make sure that the `railroad` module is available, and filter out the diagrams module when it isn't.\r\n\r\nFor example:\r\n```python\r\nfrom PyInstaller.utils.hooks import (\r\n collect_submodules,\r\n is_module_satisfies,\r\n can_import_module,\r\n is_module_or_submodule\r\n)\r\n\r\nhiddenimports = []\r\n\r\n# pkg_resources keeps vendored modules in its _vendor subpackage and does sys.meta_path based import magic to expose\r\n# them as pkg_resources.extern.*\r\nif not can_import_module('railroad'):\r\n # The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\r\n # filter it out when `railroad` is not available.\r\n hiddenimports += collect_submodules(\r\n 'pkg_resources._vendor',\r\n lambda name: is_module_or_submodule(name, 'pkg_resources._vendor.pyparsing.diagrams'))\r\nelse:\r\n hiddenimports += collect_submodules('pkg_resources._vendor')\r\n```\r\n\r\nPyInstaller: v5.1\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nhiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both. Assume the worst and include both if\n# present.\nhiddenimports += collect_submodules('packaging')\n\nhiddenimports += ['pkg_resources.markers']\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.utils.hooks import collect_submodules, is_module_satisfies, can_import_module\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does sys.meta_path based import magic to expose\n# them as pkg_resources.extern.*\n\n# The `railroad` package is an optional requirement for `pyparsing`. `pyparsing.diagrams` depends on `railroad`, so\n# filter it out when `railroad` is not available.\nif can_import_module('railroad'):\n hiddenimports = collect_submodules('pkg_resources._vendor')\nelse:\n hiddenimports = collect_submodules(\n 'pkg_resources._vendor', filter=lambda name: 'pkg_resources._vendor.pyparsing.diagram' not in name\n )\n\n# pkg_resources v45.0 dropped support for Python 2 and added this module printing a warning. We could save some bytes if\n# we would replace this by a fake module.\nif is_module_satisfies('setuptools >= 45.0.0, < 49.1.1'):\n hiddenimports.append('pkg_resources.py2_warn')\n\nexcludedimports = ['__main__']\n\n# Some more hidden imports. See:\n# https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/15#issuecomment-663699288 `packaging` can either be\n# its own package, or embedded in `pkg_resources._vendor.packaging`, or both.\nhiddenimports += collect_submodules('packaging')\n\n# As of v60.7, setuptools vendored jaraco and has pkg_resources use it. Currently, the pkg_resources._vendor.jaraco\n# namespace package cannot be automatically scanned due to limited support for pure namespace packages in our hook\n# utilities.\n#\n# In setuptools 60.7.0, the vendored jaraco.text package included \"Lorem Ipsum.txt\" data file, which also has to be\n# collected. However, the presence of the data file (and the resulting directory hierarchy) confuses the importer's\n# redirection logic; instead of trying to work-around that, tell user to upgrade or downgrade their setuptools.\nif is_module_satisfies(\"setuptools == 60.7.0\"):\n raise SystemExit(\n \"ERROR: Setuptools 60.7.0 is incompatible with PyInstaller. \"\n \"Downgrade to an earlier version or upgrade to a later version.\"\n )\n# In setuptools 60.7.1, the \"Lorem Ipsum.txt\" data file was dropped from the vendored jaraco.text package, so we can\n# accommodate it with couple of hidden imports.\nelif is_module_satisfies(\"setuptools >= 60.7.1\"):\n hiddenimports += [\n 'pkg_resources._vendor.jaraco.functools',\n 'pkg_resources._vendor.jaraco.context',\n 'pkg_resources._vendor.jaraco.text',\n ]\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]}
| 1,624 | 551 |
gh_patches_debug_4257
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-1312
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tuner use previous layer's output to adapt the next
<!-- STEP 1: Give the pull request a meaningful title. -->
### Which issue(s) does this Pull Request fix?
<!-- STEP 2: Replace the "000" with the issue ID this pull request resolves. -->
resolves #1312
### Details of the Pull Request
<!-- STEP 3: Add details/comments on the pull request. -->
<!-- STEP 4: If the pull request is in progress, click the down green arrow to select "Create Draft Pull Request", and click the button. If the pull request is ready to be reviewed, click "Create Pull Request" button directly. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/engine/tuner.py`
Content:
```
1 # Copyright 2020 The AutoKeras Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import copy
16 import os
17
18 import kerastuner
19 import tensorflow as tf
20 from kerastuner.engine import hypermodel as hm_module
21 from tensorflow.keras import callbacks as tf_callbacks
22 from tensorflow.keras.layers.experimental import preprocessing
23 from tensorflow.python.util import nest
24
25 from autokeras.utils import utils
26
27
28 class AutoTuner(kerastuner.engine.tuner.Tuner):
29 """A Tuner class based on KerasTuner for AutoKeras.
30
31 Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
32 Hypermodel which can be directly built into a Keras model, but also the
33 preprocessors. Therefore, a HyperGraph stores the overall search space containing
34 both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the
35 PreprocessGraph and KerasGraph with the provided HyperParameters.
36
37 The AutoTuner uses EarlyStopping for acceleration during the search and fully
38 train the model with full epochs and with both training and validation data.
39 The fully trained model is the best model to be used by AutoModel.
40
41 # Arguments
42 preprocessors: An instance or list of `Preprocessor` objects corresponding to
43 each AutoModel input, to preprocess a `tf.data.Dataset` before passing it
44 to the model. Defaults to None (no external preprocessing).
45 **kwargs: The args supported by KerasTuner.
46 """
47
48 def __init__(self, oracle, hypermodel, preprocessors=None, **kwargs):
49 # Initialize before super() for reload to work.
50 self._finished = False
51 super().__init__(oracle, hypermodel, **kwargs)
52 self.preprocessors = nest.flatten(preprocessors)
53 # Save or load the HyperModel.
54 self.hypermodel.hypermodel.save(os.path.join(self.project_dir, "graph"))
55
56 # Override the function to prevent building the model during initialization.
57 def _populate_initial_space(self):
58 pass
59
60 def get_best_model(self):
61 model = self._build_best_model()
62 with hm_module.maybe_distribute(self.distribution_strategy):
63 model.load_weights(self.best_model_path)
64 return model
65
66 def _on_train_begin(self, model, hp, x, *args, **kwargs):
67 """Adapt the preprocessing layers and tune the fit arguments."""
68 self.adapt(model, x)
69
70 @staticmethod
71 def adapt(model, dataset):
72 """Adapt the preprocessing layers in the model."""
73 # Currently, only support using the original dataset to adapt all the
74 # preprocessing layers before the first non-preprocessing layer.
75 # TODO: Use PreprocessingStage for preprocessing layers adapt.
76 # TODO: Use Keras Tuner for preprocessing layers adapt.
77 x = dataset.map(lambda x, y: x)
78
79 def get_output_layer(tensor):
80 tensor = nest.flatten(tensor)[0]
81 for layer in model.layers:
82 if isinstance(layer, tf.keras.layers.InputLayer):
83 continue
84 input_node = nest.flatten(layer.input)[0]
85 if input_node is tensor:
86 return layer
87 return None
88
89 for index, input_node in enumerate(nest.flatten(model.input)):
90 temp_x = x.map(lambda *args: nest.flatten(args)[index])
91 layer = get_output_layer(input_node)
92 while isinstance(layer, preprocessing.PreprocessingLayer):
93 layer.adapt(temp_x)
94 layer = get_output_layer(layer.output)
95 return model
96
97 def search(
98 self, epochs=None, callbacks=None, fit_on_val_data=False, **fit_kwargs
99 ):
100 """Search for the best HyperParameters.
101
102 If there is not early-stopping in the callbacks, the early-stopping callback
103 is injected to accelerate the search process. At the end of the search, the
104 best model will be fully trained with the specified number of epochs.
105
106 # Arguments
107 callbacks: A list of callback functions. Defaults to None.
108 fit_on_val_data: Boolean. Use the training set and validation set for the
109 final fit of the best model.
110 """
111 if self._finished:
112 return
113
114 if callbacks is None:
115 callbacks = []
116
117 # Insert early-stopping for adaptive number of epochs.
118 epochs_provided = True
119 if epochs is None:
120 epochs_provided = False
121 epochs = 1000
122 if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
123 callbacks.append(tf_callbacks.EarlyStopping(patience=10))
124
125 # Insert early-stopping for acceleration.
126 early_stopping_inserted = False
127 new_callbacks = self._deepcopy_callbacks(callbacks)
128 if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):
129 early_stopping_inserted = True
130 new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))
131
132 # Populate initial search space.
133 hp = self.oracle.get_space()
134 self.hypermodel.build(hp)
135 self.oracle.update_space(hp)
136
137 super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)
138
139 # Train the best model use validation data.
140 # Train the best model with enought number of epochs.
141 if fit_on_val_data or early_stopping_inserted:
142 copied_fit_kwargs = copy.copy(fit_kwargs)
143
144 # Remove early-stopping since no validation data.
145 # Remove early-stopping since it is inserted.
146 copied_fit_kwargs["callbacks"] = self._remove_early_stopping(callbacks)
147
148 # Decide the number of epochs.
149 copied_fit_kwargs["epochs"] = epochs
150 if not epochs_provided:
151 copied_fit_kwargs["epochs"] = self._get_best_trial_epochs()
152
153 # Concatenate training and validation data.
154 if fit_on_val_data:
155 copied_fit_kwargs["x"] = copied_fit_kwargs["x"].concatenate(
156 fit_kwargs["validation_data"]
157 )
158 copied_fit_kwargs.pop("validation_data")
159
160 model = self.final_fit(**copied_fit_kwargs)
161 else:
162 model = self.get_best_models()[0]
163
164 model.save_weights(self.best_model_path)
165 self._finished = True
166
167 def get_state(self):
168 state = super().get_state()
169 state.update({"finished": self._finished})
170 return state
171
172 def set_state(self, state):
173 super().set_state(state)
174 self._finished = state.get("finished")
175
176 @staticmethod
177 def _remove_early_stopping(callbacks):
178 return [
179 copy.deepcopy(callbacks)
180 for callback in callbacks
181 if not isinstance(callback, tf_callbacks.EarlyStopping)
182 ]
183
184 def _get_best_trial_epochs(self):
185 best_trial = self.oracle.get_best_trials(1)[0]
186 return self.oracle.get_trial(best_trial.trial_id).best_step
187
188 def _build_best_model(self):
189 best_trial = self.oracle.get_best_trials(1)[0]
190 best_hp = best_trial.hyperparameters
191 return self.hypermodel.build(best_hp)
192
193 def final_fit(self, x=None, **fit_kwargs):
194 model = self._build_best_model()
195 self.adapt(model, x)
196 model.fit(x, **fit_kwargs)
197 return model
198
199 @property
200 def best_model_path(self):
201 return os.path.join(self.project_dir, "best_model")
202
203 @property
204 def objective(self):
205 return self.oracle.objective
206
207 @property
208 def max_trials(self):
209 return self.oracle.max_trials
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/autokeras/engine/tuner.py b/autokeras/engine/tuner.py
--- a/autokeras/engine/tuner.py
+++ b/autokeras/engine/tuner.py
@@ -91,6 +91,7 @@
layer = get_output_layer(input_node)
while isinstance(layer, preprocessing.PreprocessingLayer):
layer.adapt(temp_x)
+ temp_x = temp_x.map(layer)
layer = get_output_layer(layer.output)
return model
|
{"golden_diff": "diff --git a/autokeras/engine/tuner.py b/autokeras/engine/tuner.py\n--- a/autokeras/engine/tuner.py\n+++ b/autokeras/engine/tuner.py\n@@ -91,6 +91,7 @@\n layer = get_output_layer(input_node)\n while isinstance(layer, preprocessing.PreprocessingLayer):\n layer.adapt(temp_x)\n+ temp_x = temp_x.map(layer)\n layer = get_output_layer(layer.output)\n return model\n", "issue": "tuner use previous layer's output to adapt the next\n<!-- STEP 1: Give the pull request a meaningful title. -->\r\n### Which issue(s) does this Pull Request fix?\r\n<!-- STEP 2: Replace the \"000\" with the issue ID this pull request resolves. -->\r\nresolves #1312 \r\n\r\n### Details of the Pull Request\r\n<!-- STEP 3: Add details/comments on the pull request. -->\r\n\r\n<!-- STEP 4: If the pull request is in progress, click the down green arrow to select \"Create Draft Pull Request\", and click the button. If the pull request is ready to be reviewed, click \"Create Pull Request\" button directly. -->\r\n\n", "before_files": [{"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os\n\nimport kerastuner\nimport tensorflow as tf\nfrom kerastuner.engine import hypermodel as hm_module\nfrom tensorflow.keras import callbacks as tf_callbacks\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.util import nest\n\nfrom autokeras.utils import utils\n\n\nclass AutoTuner(kerastuner.engine.tuner.Tuner):\n \"\"\"A Tuner class based on KerasTuner for AutoKeras.\n\n Different from KerasTuner's Tuner class. AutoTuner's not only tunes the\n Hypermodel which can be directly built into a Keras model, but also the\n preprocessors. Therefore, a HyperGraph stores the overall search space containing\n both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the\n PreprocessGraph and KerasGraph with the provided HyperParameters.\n\n The AutoTuner uses EarlyStopping for acceleration during the search and fully\n train the model with full epochs and with both training and validation data.\n The fully trained model is the best model to be used by AutoModel.\n\n # Arguments\n preprocessors: An instance or list of `Preprocessor` objects corresponding to\n each AutoModel input, to preprocess a `tf.data.Dataset` before passing it\n to the model. Defaults to None (no external preprocessing).\n **kwargs: The args supported by KerasTuner.\n \"\"\"\n\n def __init__(self, oracle, hypermodel, preprocessors=None, **kwargs):\n # Initialize before super() for reload to work.\n self._finished = False\n super().__init__(oracle, hypermodel, **kwargs)\n self.preprocessors = nest.flatten(preprocessors)\n # Save or load the HyperModel.\n self.hypermodel.hypermodel.save(os.path.join(self.project_dir, \"graph\"))\n\n # Override the function to prevent building the model during initialization.\n def _populate_initial_space(self):\n pass\n\n def get_best_model(self):\n model = self._build_best_model()\n with hm_module.maybe_distribute(self.distribution_strategy):\n model.load_weights(self.best_model_path)\n return model\n\n def _on_train_begin(self, model, hp, x, *args, **kwargs):\n \"\"\"Adapt the preprocessing layers and tune the fit arguments.\"\"\"\n self.adapt(model, x)\n\n @staticmethod\n def adapt(model, dataset):\n \"\"\"Adapt the preprocessing layers in the model.\"\"\"\n # Currently, only support using the original dataset to adapt all the\n # preprocessing layers before the first non-preprocessing layer.\n # TODO: Use PreprocessingStage for preprocessing layers adapt.\n # TODO: Use Keras Tuner for preprocessing layers adapt.\n x = dataset.map(lambda x, y: x)\n\n def get_output_layer(tensor):\n tensor = nest.flatten(tensor)[0]\n for layer in model.layers:\n if isinstance(layer, tf.keras.layers.InputLayer):\n continue\n input_node = nest.flatten(layer.input)[0]\n if input_node is tensor:\n return layer\n return None\n\n for index, input_node in enumerate(nest.flatten(model.input)):\n temp_x = x.map(lambda *args: nest.flatten(args)[index])\n layer = get_output_layer(input_node)\n while isinstance(layer, preprocessing.PreprocessingLayer):\n layer.adapt(temp_x)\n layer = get_output_layer(layer.output)\n return model\n\n def search(\n self, epochs=None, callbacks=None, fit_on_val_data=False, **fit_kwargs\n ):\n \"\"\"Search for the best HyperParameters.\n\n If there is not early-stopping in the callbacks, the early-stopping callback\n is injected to accelerate the search process. At the end of the search, the\n best model will be fully trained with the specified number of epochs.\n\n # Arguments\n callbacks: A list of callback functions. Defaults to None.\n fit_on_val_data: Boolean. Use the training set and validation set for the\n final fit of the best model.\n \"\"\"\n if self._finished:\n return\n\n if callbacks is None:\n callbacks = []\n\n # Insert early-stopping for adaptive number of epochs.\n epochs_provided = True\n if epochs is None:\n epochs_provided = False\n epochs = 1000\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Insert early-stopping for acceleration.\n early_stopping_inserted = False\n new_callbacks = self._deepcopy_callbacks(callbacks)\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n early_stopping_inserted = True\n new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Populate initial search space.\n hp = self.oracle.get_space()\n self.hypermodel.build(hp)\n self.oracle.update_space(hp)\n\n super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)\n\n # Train the best model use validation data.\n # Train the best model with enought number of epochs.\n if fit_on_val_data or early_stopping_inserted:\n copied_fit_kwargs = copy.copy(fit_kwargs)\n\n # Remove early-stopping since no validation data.\n # Remove early-stopping since it is inserted.\n copied_fit_kwargs[\"callbacks\"] = self._remove_early_stopping(callbacks)\n\n # Decide the number of epochs.\n copied_fit_kwargs[\"epochs\"] = epochs\n if not epochs_provided:\n copied_fit_kwargs[\"epochs\"] = self._get_best_trial_epochs()\n\n # Concatenate training and validation data.\n if fit_on_val_data:\n copied_fit_kwargs[\"x\"] = copied_fit_kwargs[\"x\"].concatenate(\n fit_kwargs[\"validation_data\"]\n )\n copied_fit_kwargs.pop(\"validation_data\")\n\n model = self.final_fit(**copied_fit_kwargs)\n else:\n model = self.get_best_models()[0]\n\n model.save_weights(self.best_model_path)\n self._finished = True\n\n def get_state(self):\n state = super().get_state()\n state.update({\"finished\": self._finished})\n return state\n\n def set_state(self, state):\n super().set_state(state)\n self._finished = state.get(\"finished\")\n\n @staticmethod\n def _remove_early_stopping(callbacks):\n return [\n copy.deepcopy(callbacks)\n for callback in callbacks\n if not isinstance(callback, tf_callbacks.EarlyStopping)\n ]\n\n def _get_best_trial_epochs(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n return self.oracle.get_trial(best_trial.trial_id).best_step\n\n def _build_best_model(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n best_hp = best_trial.hyperparameters\n return self.hypermodel.build(best_hp)\n\n def final_fit(self, x=None, **fit_kwargs):\n model = self._build_best_model()\n self.adapt(model, x)\n model.fit(x, **fit_kwargs)\n return model\n\n @property\n def best_model_path(self):\n return os.path.join(self.project_dir, \"best_model\")\n\n @property\n def objective(self):\n return self.oracle.objective\n\n @property\n def max_trials(self):\n return self.oracle.max_trials\n", "path": "autokeras/engine/tuner.py"}], "after_files": [{"content": "# Copyright 2020 The AutoKeras Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport os\n\nimport kerastuner\nimport tensorflow as tf\nfrom kerastuner.engine import hypermodel as hm_module\nfrom tensorflow.keras import callbacks as tf_callbacks\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom tensorflow.python.util import nest\n\nfrom autokeras.utils import utils\n\n\nclass AutoTuner(kerastuner.engine.tuner.Tuner):\n \"\"\"A Tuner class based on KerasTuner for AutoKeras.\n\n Different from KerasTuner's Tuner class. AutoTuner's not only tunes the\n Hypermodel which can be directly built into a Keras model, but also the\n preprocessors. Therefore, a HyperGraph stores the overall search space containing\n both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the\n PreprocessGraph and KerasGraph with the provided HyperParameters.\n\n The AutoTuner uses EarlyStopping for acceleration during the search and fully\n train the model with full epochs and with both training and validation data.\n The fully trained model is the best model to be used by AutoModel.\n\n # Arguments\n preprocessors: An instance or list of `Preprocessor` objects corresponding to\n each AutoModel input, to preprocess a `tf.data.Dataset` before passing it\n to the model. Defaults to None (no external preprocessing).\n **kwargs: The args supported by KerasTuner.\n \"\"\"\n\n def __init__(self, oracle, hypermodel, preprocessors=None, **kwargs):\n # Initialize before super() for reload to work.\n self._finished = False\n super().__init__(oracle, hypermodel, **kwargs)\n self.preprocessors = nest.flatten(preprocessors)\n # Save or load the HyperModel.\n self.hypermodel.hypermodel.save(os.path.join(self.project_dir, \"graph\"))\n\n # Override the function to prevent building the model during initialization.\n def _populate_initial_space(self):\n pass\n\n def get_best_model(self):\n model = self._build_best_model()\n with hm_module.maybe_distribute(self.distribution_strategy):\n model.load_weights(self.best_model_path)\n return model\n\n def _on_train_begin(self, model, hp, x, *args, **kwargs):\n \"\"\"Adapt the preprocessing layers and tune the fit arguments.\"\"\"\n self.adapt(model, x)\n\n @staticmethod\n def adapt(model, dataset):\n \"\"\"Adapt the preprocessing layers in the model.\"\"\"\n # Currently, only support using the original dataset to adapt all the\n # preprocessing layers before the first non-preprocessing layer.\n # TODO: Use PreprocessingStage for preprocessing layers adapt.\n # TODO: Use Keras Tuner for preprocessing layers adapt.\n x = dataset.map(lambda x, y: x)\n\n def get_output_layer(tensor):\n tensor = nest.flatten(tensor)[0]\n for layer in model.layers:\n if isinstance(layer, tf.keras.layers.InputLayer):\n continue\n input_node = nest.flatten(layer.input)[0]\n if input_node is tensor:\n return layer\n return None\n\n for index, input_node in enumerate(nest.flatten(model.input)):\n temp_x = x.map(lambda *args: nest.flatten(args)[index])\n layer = get_output_layer(input_node)\n while isinstance(layer, preprocessing.PreprocessingLayer):\n layer.adapt(temp_x)\n temp_x = temp_x.map(layer)\n layer = get_output_layer(layer.output)\n return model\n\n def search(\n self, epochs=None, callbacks=None, fit_on_val_data=False, **fit_kwargs\n ):\n \"\"\"Search for the best HyperParameters.\n\n If there is not early-stopping in the callbacks, the early-stopping callback\n is injected to accelerate the search process. At the end of the search, the\n best model will be fully trained with the specified number of epochs.\n\n # Arguments\n callbacks: A list of callback functions. Defaults to None.\n fit_on_val_data: Boolean. Use the training set and validation set for the\n final fit of the best model.\n \"\"\"\n if self._finished:\n return\n\n if callbacks is None:\n callbacks = []\n\n # Insert early-stopping for adaptive number of epochs.\n epochs_provided = True\n if epochs is None:\n epochs_provided = False\n epochs = 1000\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Insert early-stopping for acceleration.\n early_stopping_inserted = False\n new_callbacks = self._deepcopy_callbacks(callbacks)\n if not utils.contain_instance(callbacks, tf_callbacks.EarlyStopping):\n early_stopping_inserted = True\n new_callbacks.append(tf_callbacks.EarlyStopping(patience=10))\n\n # Populate initial search space.\n hp = self.oracle.get_space()\n self.hypermodel.build(hp)\n self.oracle.update_space(hp)\n\n super().search(epochs=epochs, callbacks=new_callbacks, **fit_kwargs)\n\n # Train the best model use validation data.\n # Train the best model with enought number of epochs.\n if fit_on_val_data or early_stopping_inserted:\n copied_fit_kwargs = copy.copy(fit_kwargs)\n\n # Remove early-stopping since no validation data.\n # Remove early-stopping since it is inserted.\n copied_fit_kwargs[\"callbacks\"] = self._remove_early_stopping(callbacks)\n\n # Decide the number of epochs.\n copied_fit_kwargs[\"epochs\"] = epochs\n if not epochs_provided:\n copied_fit_kwargs[\"epochs\"] = self._get_best_trial_epochs()\n\n # Concatenate training and validation data.\n if fit_on_val_data:\n copied_fit_kwargs[\"x\"] = copied_fit_kwargs[\"x\"].concatenate(\n fit_kwargs[\"validation_data\"]\n )\n copied_fit_kwargs.pop(\"validation_data\")\n\n model = self.final_fit(**copied_fit_kwargs)\n else:\n model = self.get_best_models()[0]\n\n model.save_weights(self.best_model_path)\n self._finished = True\n\n def get_state(self):\n state = super().get_state()\n state.update({\"finished\": self._finished})\n return state\n\n def set_state(self, state):\n super().set_state(state)\n self._finished = state.get(\"finished\")\n\n @staticmethod\n def _remove_early_stopping(callbacks):\n return [\n copy.deepcopy(callbacks)\n for callback in callbacks\n if not isinstance(callback, tf_callbacks.EarlyStopping)\n ]\n\n def _get_best_trial_epochs(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n return self.oracle.get_trial(best_trial.trial_id).best_step\n\n def _build_best_model(self):\n best_trial = self.oracle.get_best_trials(1)[0]\n best_hp = best_trial.hyperparameters\n return self.hypermodel.build(best_hp)\n\n def final_fit(self, x=None, **fit_kwargs):\n model = self._build_best_model()\n self.adapt(model, x)\n model.fit(x, **fit_kwargs)\n return model\n\n @property\n def best_model_path(self):\n return os.path.join(self.project_dir, \"best_model\")\n\n @property\n def objective(self):\n return self.oracle.objective\n\n @property\n def max_trials(self):\n return self.oracle.max_trials\n", "path": "autokeras/engine/tuner.py"}]}
| 2,642 | 105 |
gh_patches_debug_2753
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-1172
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Star imports from `mlserver.codecs` not working
For example:
```python
from mlserver.codecs import *
```
Throws an error:
```python
Traceback (most recent call last):
File "/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py", line 3460, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-b8cc62508f29>", line 1, in <module>
from mlserver.codecs import *
AttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec'
```
This is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these.
P.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/codecs/__init__.py`
Content:
```
1 from .numpy import NumpyCodec, NumpyRequestCodec
2 from .pandas import PandasCodec
3 from .string import StringCodec
4 from .base64 import Base64Codec
5 from .datetime import DatetimeCodec
6 from .errors import CodecError
7 from .decorator import decode_args
8 from .base import (
9 InputCodec,
10 RequestCodec,
11 register_input_codec,
12 register_request_codec,
13 InputCodecLike,
14 RequestCodecLike,
15 )
16 from .utils import (
17 DecodedParameterName,
18 has_decoded,
19 get_decoded,
20 get_decoded_or_raw,
21 encode_inference_response,
22 encode_response_output,
23 decode_request_input,
24 decode_inference_request,
25 )
26
27 __all__ = [
28 "CodecError",
29 "NumpyCodec",
30 "NumpyRequestCodec",
31 "StringCodec",
32 "StringRequestCodec",
33 "Base64Codec",
34 "DatetimeCodec",
35 "PandasCodec",
36 "InputCodec",
37 "InputCodecLike",
38 "RequestCodec",
39 "RequestCodecLike",
40 "DecodedParameterName",
41 "register_input_codec",
42 "register_request_codec",
43 "has_decoded",
44 "get_decoded",
45 "get_decoded_or_raw",
46 "encode_inference_response",
47 "encode_response_output",
48 "decode_request_input",
49 "decode_inference_request",
50 "decode_args",
51 ]
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py
--- a/mlserver/codecs/__init__.py
+++ b/mlserver/codecs/__init__.py
@@ -1,6 +1,6 @@
from .numpy import NumpyCodec, NumpyRequestCodec
from .pandas import PandasCodec
-from .string import StringCodec
+from .string import StringCodec, StringRequestCodec
from .base64 import Base64Codec
from .datetime import DatetimeCodec
from .errors import CodecError
|
{"golden_diff": "diff --git a/mlserver/codecs/__init__.py b/mlserver/codecs/__init__.py\n--- a/mlserver/codecs/__init__.py\n+++ b/mlserver/codecs/__init__.py\n@@ -1,6 +1,6 @@\n from .numpy import NumpyCodec, NumpyRequestCodec\n from .pandas import PandasCodec\n-from .string import StringCodec\n+from .string import StringCodec, StringRequestCodec\n from .base64 import Base64Codec\n from .datetime import DatetimeCodec\n from .errors import CodecError\n", "issue": "Star imports from `mlserver.codecs` not working\nFor example:\r\n\r\n```python\r\nfrom mlserver.codecs import *\r\n```\r\nThrows an error:\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/janis/.conda/envs/py310/lib/python3.10/site-packages/IPython/core/interactiveshell.py\", line 3460, in run_code\r\n exec(code_obj, self.user_global_ns, self.user_ns)\r\n File \"<ipython-input-2-b8cc62508f29>\", line 1, in <module>\r\n from mlserver.codecs import *\r\nAttributeError: module 'mlserver.codecs' has no attribute 'StringRequestCodec'\r\n```\r\n\r\nThis is likely because `__all__` is out-of-date with the actual imports. I haven't tested other sub-packages, but it might be worth looking at these.\r\n\r\nP.S. I'm not a big fan of `__all__` and star imports in particular, the main issue is that the existence of `__all__` gives rise to two public APIs which may diverge (as it has in this case).\n", "before_files": [{"content": "from .numpy import NumpyCodec, NumpyRequestCodec\nfrom .pandas import PandasCodec\nfrom .string import StringCodec\nfrom .base64 import Base64Codec\nfrom .datetime import DatetimeCodec\nfrom .errors import CodecError\nfrom .decorator import decode_args\nfrom .base import (\n InputCodec,\n RequestCodec,\n register_input_codec,\n register_request_codec,\n InputCodecLike,\n RequestCodecLike,\n)\nfrom .utils import (\n DecodedParameterName,\n has_decoded,\n get_decoded,\n get_decoded_or_raw,\n encode_inference_response,\n encode_response_output,\n decode_request_input,\n decode_inference_request,\n)\n\n__all__ = [\n \"CodecError\",\n \"NumpyCodec\",\n \"NumpyRequestCodec\",\n \"StringCodec\",\n \"StringRequestCodec\",\n \"Base64Codec\",\n \"DatetimeCodec\",\n \"PandasCodec\",\n \"InputCodec\",\n \"InputCodecLike\",\n \"RequestCodec\",\n \"RequestCodecLike\",\n \"DecodedParameterName\",\n \"register_input_codec\",\n \"register_request_codec\",\n \"has_decoded\",\n \"get_decoded\",\n \"get_decoded_or_raw\",\n \"encode_inference_response\",\n \"encode_response_output\",\n \"decode_request_input\",\n \"decode_inference_request\",\n \"decode_args\",\n]\n", "path": "mlserver/codecs/__init__.py"}], "after_files": [{"content": "from .numpy import NumpyCodec, NumpyRequestCodec\nfrom .pandas import PandasCodec\nfrom .string import StringCodec, StringRequestCodec\nfrom .base64 import Base64Codec\nfrom .datetime import DatetimeCodec\nfrom .errors import CodecError\nfrom .decorator import decode_args\nfrom .base import (\n InputCodec,\n RequestCodec,\n register_input_codec,\n register_request_codec,\n InputCodecLike,\n RequestCodecLike,\n)\nfrom .utils import (\n DecodedParameterName,\n has_decoded,\n get_decoded,\n get_decoded_or_raw,\n encode_inference_response,\n encode_response_output,\n decode_request_input,\n decode_inference_request,\n)\n\n__all__ = [\n \"CodecError\",\n \"NumpyCodec\",\n \"NumpyRequestCodec\",\n \"StringCodec\",\n \"StringRequestCodec\",\n \"Base64Codec\",\n \"DatetimeCodec\",\n \"PandasCodec\",\n \"InputCodec\",\n \"InputCodecLike\",\n \"RequestCodec\",\n \"RequestCodecLike\",\n \"DecodedParameterName\",\n \"register_input_codec\",\n \"register_request_codec\",\n \"has_decoded\",\n \"get_decoded\",\n \"get_decoded_or_raw\",\n \"encode_inference_response\",\n \"encode_response_output\",\n \"decode_request_input\",\n \"decode_inference_request\",\n \"decode_args\",\n]\n", "path": "mlserver/codecs/__init__.py"}]}
| 895 | 121 |
gh_patches_debug_19124
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-664
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
errors in balanced_parentheses.py
https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17
`Line 16` returns wrong result due to `not`
`Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty.
https://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3
there is no module named **.Stack**. it should be
_from **s**tack import Stack_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `data_structures/stacks/balanced_parentheses.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import absolute_import
3 from .Stack import Stack
4
5 __author__ = 'Omkar Pathak'
6
7
8 def balanced_parentheses(parentheses):
9 """ Use a stack to check if a string of parentheses is balanced."""
10 stack = Stack(len(parentheses))
11 for parenthesis in parentheses:
12 if parenthesis == '(':
13 stack.push(parenthesis)
14 elif parenthesis == ')':
15 stack.pop()
16 return not stack.is_empty()
17
18
19 if __name__ == '__main__':
20 examples = ['((()))', '((())']
21 print('Balanced parentheses demonstration:\n')
22 for example in examples:
23 print(example + ': ' + str(balanced_parentheses(example)))
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py
--- a/data_structures/stacks/balanced_parentheses.py
+++ b/data_structures/stacks/balanced_parentheses.py
@@ -1,6 +1,6 @@
from __future__ import print_function
from __future__ import absolute_import
-from .Stack import Stack
+from stack import Stack
__author__ = 'Omkar Pathak'
@@ -12,12 +12,14 @@
if parenthesis == '(':
stack.push(parenthesis)
elif parenthesis == ')':
+ if stack.is_empty():
+ return False
stack.pop()
- return not stack.is_empty()
+ return stack.is_empty()
if __name__ == '__main__':
- examples = ['((()))', '((())']
+ examples = ['((()))', '((())', '(()))']
print('Balanced parentheses demonstration:\n')
for example in examples:
print(example + ': ' + str(balanced_parentheses(example)))
|
{"golden_diff": "diff --git a/data_structures/stacks/balanced_parentheses.py b/data_structures/stacks/balanced_parentheses.py\n--- a/data_structures/stacks/balanced_parentheses.py\n+++ b/data_structures/stacks/balanced_parentheses.py\n@@ -1,6 +1,6 @@\n from __future__ import print_function\n from __future__ import absolute_import\n-from .Stack import Stack\n+from stack import Stack\n \n __author__ = 'Omkar Pathak'\n \n@@ -12,12 +12,14 @@\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n+ if stack.is_empty():\n+ return False\n stack.pop()\n- return not stack.is_empty()\n+ return stack.is_empty()\n \n \n if __name__ == '__main__':\n- examples = ['((()))', '((())']\n+ examples = ['((()))', '((())', '(()))']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "issue": "errors in balanced_parentheses.py\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L13-L17\r\n\r\n`Line 16` returns wrong result due to `not`\r\n\r\n`Line 15` can cause error for inputs like **))**. before calling _pop_ it should be checked if stack is empty.\r\n\r\nhttps://github.com/TheAlgorithms/Python/blob/6a95bf9ad8090a1280abf76752a52ab9ee2420b7/data_structures/stacks/balanced_parentheses.py#L3\r\n\r\nthere is no module named **.Stack**. it should be\r\n _from **s**tack import Stack_\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom .Stack import Stack\n\n__author__ = 'Omkar Pathak'\n\n\ndef balanced_parentheses(parentheses):\n \"\"\" Use a stack to check if a string of parentheses is balanced.\"\"\"\n stack = Stack(len(parentheses))\n for parenthesis in parentheses:\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n stack.pop()\n return not stack.is_empty()\n\n\nif __name__ == '__main__':\n examples = ['((()))', '((())']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "path": "data_structures/stacks/balanced_parentheses.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom stack import Stack\n\n__author__ = 'Omkar Pathak'\n\n\ndef balanced_parentheses(parentheses):\n \"\"\" Use a stack to check if a string of parentheses is balanced.\"\"\"\n stack = Stack(len(parentheses))\n for parenthesis in parentheses:\n if parenthesis == '(':\n stack.push(parenthesis)\n elif parenthesis == ')':\n if stack.is_empty():\n return False\n stack.pop()\n return stack.is_empty()\n\n\nif __name__ == '__main__':\n examples = ['((()))', '((())', '(()))']\n print('Balanced parentheses demonstration:\\n')\n for example in examples:\n print(example + ': ' + str(balanced_parentheses(example)))\n", "path": "data_structures/stacks/balanced_parentheses.py"}]}
| 667 | 244 |
gh_patches_debug_30638
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-2935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing from OpenLibrary breaks markdown quotes
**Describe the bug**
When importing a book from OpenLibrary, if the description includes a quote as indicated in markdown (with ">"), the quote is not formatted/Bookwyrm doesn't recognize the code.
**To Reproduce**
1. Find a book with a description on OpenLibrary that uses a quote (example to try: OL27172905W)
2. Import the book to a Bookwyrm instance
3. View the description
**Expected behavior**
The quote should appear formatted correctly like markdown links, italics, etc. do.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Instance**
bookwyrm.social
**Additional context**
Add any other context about the problem here.
---
**Desktop (please complete the following information):**
- OS: Windows
**Smartphone (please complete the following information):**
- OS: Android
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/connectors/openlibrary.py`
Content:
```
1 """ openlibrary data connector """
2 import re
3 from typing import Any, Optional, Union, Iterator, Iterable
4
5 from bookwyrm import models
6 from bookwyrm.book_search import SearchResult
7 from .abstract_connector import AbstractConnector, Mapping, JsonDict
8 from .abstract_connector import get_data, infer_physical_format, unique_physical_format
9 from .connector_manager import ConnectorException, create_edition_task
10 from .openlibrary_languages import languages
11
12
13 class Connector(AbstractConnector):
14 """instantiate a connector for OL"""
15
16 generated_remote_link_field = "openlibrary_link"
17
18 def __init__(self, identifier: str):
19 super().__init__(identifier)
20
21 get_first = lambda a, *args: a[0]
22 get_remote_id = lambda a, *args: self.base_url + a
23 self.book_mappings = [
24 Mapping("title"),
25 Mapping("id", remote_field="key", formatter=get_remote_id),
26 Mapping("cover", remote_field="covers", formatter=self.get_cover_url),
27 Mapping("sortTitle", remote_field="sort_title"),
28 Mapping("subtitle"),
29 Mapping("description", formatter=get_description),
30 Mapping("languages", formatter=get_languages),
31 Mapping("series", formatter=get_first),
32 Mapping("seriesNumber", remote_field="series_number"),
33 Mapping("subjects"),
34 Mapping("subjectPlaces", remote_field="subject_places"),
35 Mapping("isbn13", remote_field="isbn_13", formatter=get_first),
36 Mapping("isbn10", remote_field="isbn_10", formatter=get_first),
37 Mapping("lccn", formatter=get_first),
38 Mapping("oclcNumber", remote_field="oclc_numbers", formatter=get_first),
39 Mapping(
40 "openlibraryKey", remote_field="key", formatter=get_openlibrary_key
41 ),
42 Mapping("goodreadsKey", remote_field="goodreads_key"),
43 Mapping("asin"),
44 Mapping(
45 "firstPublishedDate",
46 remote_field="first_publish_date",
47 ),
48 Mapping("publishedDate", remote_field="publish_date"),
49 Mapping("pages", remote_field="number_of_pages"),
50 Mapping(
51 "physicalFormat",
52 remote_field="physical_format",
53 formatter=infer_physical_format,
54 ),
55 Mapping(
56 "physicalFormatDetail",
57 remote_field="physical_format",
58 formatter=unique_physical_format,
59 ),
60 Mapping("publishers"),
61 ]
62
63 self.author_mappings = [
64 Mapping("id", remote_field="key", formatter=get_remote_id),
65 Mapping("name"),
66 Mapping(
67 "openlibraryKey", remote_field="key", formatter=get_openlibrary_key
68 ),
69 Mapping("born", remote_field="birth_date"),
70 Mapping("died", remote_field="death_date"),
71 Mapping("bio", formatter=get_description),
72 Mapping(
73 "isni",
74 remote_field="remote_ids",
75 formatter=lambda b: get_dict_field(b, "isni"),
76 ),
77 Mapping(
78 "asin",
79 remote_field="remote_ids",
80 formatter=lambda b: get_dict_field(b, "amazon"),
81 ),
82 Mapping(
83 "viaf",
84 remote_field="remote_ids",
85 formatter=lambda b: get_dict_field(b, "viaf"),
86 ),
87 Mapping(
88 "wikidata",
89 remote_field="remote_ids",
90 formatter=lambda b: get_dict_field(b, "wikidata"),
91 ),
92 Mapping(
93 "wikipedia_link", remote_field="links", formatter=get_wikipedia_link
94 ),
95 Mapping("inventaire_id", remote_field="links", formatter=get_inventaire_id),
96 ]
97
98 def get_book_data(self, remote_id: str) -> JsonDict:
99 data = get_data(remote_id)
100 if data.get("type", {}).get("key") == "/type/redirect":
101 remote_id = self.base_url + data.get("location", "")
102 return get_data(remote_id)
103 return data
104
105 def get_remote_id_from_data(self, data: JsonDict) -> str:
106 """format a url from an openlibrary id field"""
107 try:
108 key = data["key"]
109 except KeyError:
110 raise ConnectorException("Invalid book data")
111 return f"{self.books_url}{key}"
112
113 def is_work_data(self, data: JsonDict) -> bool:
114 return bool(re.match(r"^[\/\w]+OL\d+W$", data["key"]))
115
116 def get_edition_from_work_data(self, data: JsonDict) -> JsonDict:
117 try:
118 key = data["key"]
119 except KeyError:
120 raise ConnectorException("Invalid book data")
121 url = f"{self.books_url}{key}/editions"
122 data = self.get_book_data(url)
123 edition = pick_default_edition(data["entries"])
124 if not edition:
125 raise ConnectorException("No editions for work")
126 return edition
127
128 def get_work_from_edition_data(self, data: JsonDict) -> JsonDict:
129 try:
130 key = data["works"][0]["key"]
131 except (IndexError, KeyError):
132 raise ConnectorException("No work found for edition")
133 url = f"{self.books_url}{key}"
134 return self.get_book_data(url)
135
136 def get_authors_from_data(self, data: JsonDict) -> Iterator[models.Author]:
137 """parse author json and load or create authors"""
138 for author_blob in data.get("authors", []):
139 author_blob = author_blob.get("author", author_blob)
140 # this id is "/authors/OL1234567A"
141 author_id = author_blob["key"]
142 url = f"{self.base_url}{author_id}"
143 author = self.get_or_create_author(url)
144 if not author:
145 continue
146 yield author
147
148 def get_cover_url(self, cover_blob: list[str], size: str = "L") -> Optional[str]:
149 """ask openlibrary for the cover"""
150 if not cover_blob:
151 return None
152 cover_id = cover_blob[0]
153 image_name = f"{cover_id}-{size}.jpg"
154 return f"{self.covers_url}/b/id/{image_name}"
155
156 def parse_search_data(
157 self, data: JsonDict, min_confidence: float
158 ) -> Iterator[SearchResult]:
159 for idx, search_result in enumerate(data.get("docs", [])):
160 # build the remote id from the openlibrary key
161 key = self.books_url + search_result["key"]
162 author = search_result.get("author_name") or ["Unknown"]
163 cover_blob = search_result.get("cover_i")
164 cover = self.get_cover_url([cover_blob], size="M") if cover_blob else None
165
166 # OL doesn't provide confidence, but it does sort by an internal ranking, so
167 # this confidence value is relative to the list position
168 confidence = 1 / (idx + 1)
169
170 yield SearchResult(
171 title=search_result.get("title"),
172 key=key,
173 author=", ".join(author),
174 connector=self,
175 year=search_result.get("first_publish_year"),
176 cover=cover,
177 confidence=confidence,
178 )
179
180 def parse_isbn_search_data(self, data: JsonDict) -> Iterator[SearchResult]:
181 for search_result in list(data.values()):
182 # build the remote id from the openlibrary key
183 key = self.books_url + search_result["key"]
184 authors = search_result.get("authors") or [{"name": "Unknown"}]
185 author_names = [author.get("name") for author in authors]
186 yield SearchResult(
187 title=search_result.get("title"),
188 key=key,
189 author=", ".join(author_names),
190 connector=self,
191 year=search_result.get("publish_date"),
192 )
193
194 def load_edition_data(self, olkey: str) -> JsonDict:
195 """query openlibrary for editions of a work"""
196 url = f"{self.books_url}/works/{olkey}/editions"
197 return self.get_book_data(url)
198
199 def expand_book_data(self, book: models.Book) -> None:
200 work = book
201 # go from the edition to the work, if necessary
202 if isinstance(book, models.Edition):
203 work = book.parent_work
204
205 # we can mass download edition data from OL to avoid repeatedly querying
206 try:
207 edition_options = self.load_edition_data(work.openlibrary_key)
208 except ConnectorException:
209 # who knows, man
210 return
211
212 for edition_data in edition_options.get("entries", []):
213 # does this edition have ANY interesting data?
214 if ignore_edition(edition_data):
215 continue
216 create_edition_task.delay(self.connector.id, work.id, edition_data)
217
218
219 def ignore_edition(edition_data: JsonDict) -> bool:
220 """don't load a million editions that have no metadata"""
221 # an isbn, we love to see it
222 if edition_data.get("isbn_13") or edition_data.get("isbn_10"):
223 return False
224 # grudgingly, oclc can stay
225 if edition_data.get("oclc_numbers"):
226 return False
227 # if it has a cover it can stay
228 if edition_data.get("covers"):
229 return False
230 # keep non-english editions
231 if edition_data.get("languages") and "languages/eng" not in str(
232 edition_data.get("languages")
233 ):
234 return False
235 return True
236
237
238 def get_description(description_blob: Union[JsonDict, str]) -> Optional[str]:
239 """descriptions can be a string or a dict"""
240 if isinstance(description_blob, dict):
241 return description_blob.get("value")
242 return description_blob
243
244
245 def get_openlibrary_key(key: str) -> str:
246 """convert /books/OL27320736M into OL27320736M"""
247 return key.split("/")[-1]
248
249
250 def get_languages(language_blob: Iterable[JsonDict]) -> list[Optional[str]]:
251 """/language/eng -> English"""
252 langs = []
253 for lang in language_blob:
254 langs.append(languages.get(lang.get("key", ""), None))
255 return langs
256
257
258 def get_dict_field(blob: Optional[JsonDict], field_name: str) -> Optional[Any]:
259 """extract the isni from the remote id data for the author"""
260 if not blob or not isinstance(blob, dict):
261 return None
262 return blob.get(field_name)
263
264
265 def get_wikipedia_link(links: list[Any]) -> Optional[str]:
266 """extract wikipedia links"""
267 if not isinstance(links, list):
268 return None
269
270 for link in links:
271 if not isinstance(link, dict):
272 continue
273 if link.get("title") == "wikipedia":
274 return link.get("url")
275 return None
276
277
278 def get_inventaire_id(links: list[Any]) -> Optional[str]:
279 """extract and format inventaire ids"""
280 if not isinstance(links, list):
281 return None
282
283 for link in links:
284 if not isinstance(link, dict):
285 continue
286 if link.get("title") == "inventaire.io":
287 iv_link = link.get("url")
288 if not isinstance(iv_link, str):
289 return None
290 return iv_link.split("/")[-1]
291 return None
292
293
294 def pick_default_edition(options: list[JsonDict]) -> Optional[JsonDict]:
295 """favor physical copies with covers in english"""
296 if not options:
297 return None
298 if len(options) == 1:
299 return options[0]
300
301 options = [e for e in options if e.get("covers")] or options
302 options = [
303 e for e in options if "/languages/eng" in str(e.get("languages"))
304 ] or options
305 formats = ["paperback", "hardcover", "mass market paperback"]
306 options = [
307 e for e in options if str(e.get("physical_format")).lower() in formats
308 ] or options
309 options = [e for e in options if e.get("isbn_13")] or options
310 options = [e for e in options if e.get("ocaid")] or options
311 return options[0]
312
```
Path: `bookwyrm/utils/sanitizer.py`
Content:
```
1 """Clean user-provided text"""
2 import bleach
3
4
5 def clean(input_text):
6 """Run through "bleach" """
7 return bleach.clean(
8 input_text,
9 tags=[
10 "p",
11 "blockquote",
12 "br",
13 "b",
14 "i",
15 "strong",
16 "em",
17 "pre",
18 "a",
19 "span",
20 "ul",
21 "ol",
22 "li",
23 ],
24 attributes=["href", "rel", "src", "alt", "data-mention"],
25 strip=True,
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/connectors/openlibrary.py b/bookwyrm/connectors/openlibrary.py
--- a/bookwyrm/connectors/openlibrary.py
+++ b/bookwyrm/connectors/openlibrary.py
@@ -2,8 +2,11 @@
import re
from typing import Any, Optional, Union, Iterator, Iterable
+from markdown import markdown
+
from bookwyrm import models
from bookwyrm.book_search import SearchResult
+from bookwyrm.utils.sanitizer import clean
from .abstract_connector import AbstractConnector, Mapping, JsonDict
from .abstract_connector import get_data, infer_physical_format, unique_physical_format
from .connector_manager import ConnectorException, create_edition_task
@@ -235,11 +238,22 @@
return True
-def get_description(description_blob: Union[JsonDict, str]) -> Optional[str]:
+def get_description(description_blob: Union[JsonDict, str]) -> str:
"""descriptions can be a string or a dict"""
if isinstance(description_blob, dict):
- return description_blob.get("value")
- return description_blob
+ description = markdown(description_blob.get("value", ""))
+ else:
+ description = markdown(description_blob)
+
+ if (
+ description.startswith("<p>")
+ and description.endswith("</p>")
+ and description.count("<p>") == 1
+ ):
+ # If there is just one <p> tag and it is around the text remove it
+ return description[len("<p>") : -len("</p>")].strip()
+
+ return clean(description)
def get_openlibrary_key(key: str) -> str:
diff --git a/bookwyrm/utils/sanitizer.py b/bookwyrm/utils/sanitizer.py
--- a/bookwyrm/utils/sanitizer.py
+++ b/bookwyrm/utils/sanitizer.py
@@ -2,7 +2,7 @@
import bleach
-def clean(input_text):
+def clean(input_text: str) -> str:
"""Run through "bleach" """
return bleach.clean(
input_text,
|
{"golden_diff": "diff --git a/bookwyrm/connectors/openlibrary.py b/bookwyrm/connectors/openlibrary.py\n--- a/bookwyrm/connectors/openlibrary.py\n+++ b/bookwyrm/connectors/openlibrary.py\n@@ -2,8 +2,11 @@\n import re\n from typing import Any, Optional, Union, Iterator, Iterable\n \n+from markdown import markdown\n+\n from bookwyrm import models\n from bookwyrm.book_search import SearchResult\n+from bookwyrm.utils.sanitizer import clean\n from .abstract_connector import AbstractConnector, Mapping, JsonDict\n from .abstract_connector import get_data, infer_physical_format, unique_physical_format\n from .connector_manager import ConnectorException, create_edition_task\n@@ -235,11 +238,22 @@\n return True\n \n \n-def get_description(description_blob: Union[JsonDict, str]) -> Optional[str]:\n+def get_description(description_blob: Union[JsonDict, str]) -> str:\n \"\"\"descriptions can be a string or a dict\"\"\"\n if isinstance(description_blob, dict):\n- return description_blob.get(\"value\")\n- return description_blob\n+ description = markdown(description_blob.get(\"value\", \"\"))\n+ else:\n+ description = markdown(description_blob)\n+\n+ if (\n+ description.startswith(\"<p>\")\n+ and description.endswith(\"</p>\")\n+ and description.count(\"<p>\") == 1\n+ ):\n+ # If there is just one <p> tag and it is around the text remove it\n+ return description[len(\"<p>\") : -len(\"</p>\")].strip()\n+\n+ return clean(description)\n \n \n def get_openlibrary_key(key: str) -> str:\ndiff --git a/bookwyrm/utils/sanitizer.py b/bookwyrm/utils/sanitizer.py\n--- a/bookwyrm/utils/sanitizer.py\n+++ b/bookwyrm/utils/sanitizer.py\n@@ -2,7 +2,7 @@\n import bleach\n \n \n-def clean(input_text):\n+def clean(input_text: str) -> str:\n \"\"\"Run through \"bleach\" \"\"\"\n return bleach.clean(\n input_text,\n", "issue": "Importing from OpenLibrary breaks markdown quotes\n**Describe the bug**\r\nWhen importing a book from OpenLibrary, if the description includes a quote as indicated in markdown (with \">\"), the quote is not formatted/Bookwyrm doesn't recognize the code.\r\n\r\n**To Reproduce**\r\n1. Find a book with a description on OpenLibrary that uses a quote (example to try: OL27172905W)\r\n2. Import the book to a Bookwyrm instance\r\n3. View the description\r\n\r\n**Expected behavior**\r\nThe quote should appear formatted correctly like markdown links, italics, etc. do.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows\r\n\r\n**Smartphone (please complete the following information):**\r\n - OS: Android\r\n\n", "before_files": [{"content": "\"\"\" openlibrary data connector \"\"\"\nimport re\nfrom typing import Any, Optional, Union, Iterator, Iterable\n\nfrom bookwyrm import models\nfrom bookwyrm.book_search import SearchResult\nfrom .abstract_connector import AbstractConnector, Mapping, JsonDict\nfrom .abstract_connector import get_data, infer_physical_format, unique_physical_format\nfrom .connector_manager import ConnectorException, create_edition_task\nfrom .openlibrary_languages import languages\n\n\nclass Connector(AbstractConnector):\n \"\"\"instantiate a connector for OL\"\"\"\n\n generated_remote_link_field = \"openlibrary_link\"\n\n def __init__(self, identifier: str):\n super().__init__(identifier)\n\n get_first = lambda a, *args: a[0]\n get_remote_id = lambda a, *args: self.base_url + a\n self.book_mappings = [\n Mapping(\"title\"),\n Mapping(\"id\", remote_field=\"key\", formatter=get_remote_id),\n Mapping(\"cover\", remote_field=\"covers\", formatter=self.get_cover_url),\n Mapping(\"sortTitle\", remote_field=\"sort_title\"),\n Mapping(\"subtitle\"),\n Mapping(\"description\", formatter=get_description),\n Mapping(\"languages\", formatter=get_languages),\n Mapping(\"series\", formatter=get_first),\n Mapping(\"seriesNumber\", remote_field=\"series_number\"),\n Mapping(\"subjects\"),\n Mapping(\"subjectPlaces\", remote_field=\"subject_places\"),\n Mapping(\"isbn13\", remote_field=\"isbn_13\", formatter=get_first),\n Mapping(\"isbn10\", remote_field=\"isbn_10\", formatter=get_first),\n Mapping(\"lccn\", formatter=get_first),\n Mapping(\"oclcNumber\", remote_field=\"oclc_numbers\", formatter=get_first),\n Mapping(\n \"openlibraryKey\", remote_field=\"key\", formatter=get_openlibrary_key\n ),\n Mapping(\"goodreadsKey\", remote_field=\"goodreads_key\"),\n Mapping(\"asin\"),\n Mapping(\n \"firstPublishedDate\",\n remote_field=\"first_publish_date\",\n ),\n Mapping(\"publishedDate\", remote_field=\"publish_date\"),\n Mapping(\"pages\", remote_field=\"number_of_pages\"),\n Mapping(\n \"physicalFormat\",\n remote_field=\"physical_format\",\n formatter=infer_physical_format,\n ),\n Mapping(\n \"physicalFormatDetail\",\n remote_field=\"physical_format\",\n formatter=unique_physical_format,\n ),\n Mapping(\"publishers\"),\n ]\n\n self.author_mappings = [\n Mapping(\"id\", remote_field=\"key\", formatter=get_remote_id),\n Mapping(\"name\"),\n Mapping(\n \"openlibraryKey\", remote_field=\"key\", formatter=get_openlibrary_key\n ),\n Mapping(\"born\", remote_field=\"birth_date\"),\n Mapping(\"died\", remote_field=\"death_date\"),\n Mapping(\"bio\", formatter=get_description),\n Mapping(\n \"isni\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"isni\"),\n ),\n Mapping(\n \"asin\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"amazon\"),\n ),\n Mapping(\n \"viaf\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"viaf\"),\n ),\n Mapping(\n \"wikidata\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"wikidata\"),\n ),\n Mapping(\n \"wikipedia_link\", remote_field=\"links\", formatter=get_wikipedia_link\n ),\n Mapping(\"inventaire_id\", remote_field=\"links\", formatter=get_inventaire_id),\n ]\n\n def get_book_data(self, remote_id: str) -> JsonDict:\n data = get_data(remote_id)\n if data.get(\"type\", {}).get(\"key\") == \"/type/redirect\":\n remote_id = self.base_url + data.get(\"location\", \"\")\n return get_data(remote_id)\n return data\n\n def get_remote_id_from_data(self, data: JsonDict) -> str:\n \"\"\"format a url from an openlibrary id field\"\"\"\n try:\n key = data[\"key\"]\n except KeyError:\n raise ConnectorException(\"Invalid book data\")\n return f\"{self.books_url}{key}\"\n\n def is_work_data(self, data: JsonDict) -> bool:\n return bool(re.match(r\"^[\\/\\w]+OL\\d+W$\", data[\"key\"]))\n\n def get_edition_from_work_data(self, data: JsonDict) -> JsonDict:\n try:\n key = data[\"key\"]\n except KeyError:\n raise ConnectorException(\"Invalid book data\")\n url = f\"{self.books_url}{key}/editions\"\n data = self.get_book_data(url)\n edition = pick_default_edition(data[\"entries\"])\n if not edition:\n raise ConnectorException(\"No editions for work\")\n return edition\n\n def get_work_from_edition_data(self, data: JsonDict) -> JsonDict:\n try:\n key = data[\"works\"][0][\"key\"]\n except (IndexError, KeyError):\n raise ConnectorException(\"No work found for edition\")\n url = f\"{self.books_url}{key}\"\n return self.get_book_data(url)\n\n def get_authors_from_data(self, data: JsonDict) -> Iterator[models.Author]:\n \"\"\"parse author json and load or create authors\"\"\"\n for author_blob in data.get(\"authors\", []):\n author_blob = author_blob.get(\"author\", author_blob)\n # this id is \"/authors/OL1234567A\"\n author_id = author_blob[\"key\"]\n url = f\"{self.base_url}{author_id}\"\n author = self.get_or_create_author(url)\n if not author:\n continue\n yield author\n\n def get_cover_url(self, cover_blob: list[str], size: str = \"L\") -> Optional[str]:\n \"\"\"ask openlibrary for the cover\"\"\"\n if not cover_blob:\n return None\n cover_id = cover_blob[0]\n image_name = f\"{cover_id}-{size}.jpg\"\n return f\"{self.covers_url}/b/id/{image_name}\"\n\n def parse_search_data(\n self, data: JsonDict, min_confidence: float\n ) -> Iterator[SearchResult]:\n for idx, search_result in enumerate(data.get(\"docs\", [])):\n # build the remote id from the openlibrary key\n key = self.books_url + search_result[\"key\"]\n author = search_result.get(\"author_name\") or [\"Unknown\"]\n cover_blob = search_result.get(\"cover_i\")\n cover = self.get_cover_url([cover_blob], size=\"M\") if cover_blob else None\n\n # OL doesn't provide confidence, but it does sort by an internal ranking, so\n # this confidence value is relative to the list position\n confidence = 1 / (idx + 1)\n\n yield SearchResult(\n title=search_result.get(\"title\"),\n key=key,\n author=\", \".join(author),\n connector=self,\n year=search_result.get(\"first_publish_year\"),\n cover=cover,\n confidence=confidence,\n )\n\n def parse_isbn_search_data(self, data: JsonDict) -> Iterator[SearchResult]:\n for search_result in list(data.values()):\n # build the remote id from the openlibrary key\n key = self.books_url + search_result[\"key\"]\n authors = search_result.get(\"authors\") or [{\"name\": \"Unknown\"}]\n author_names = [author.get(\"name\") for author in authors]\n yield SearchResult(\n title=search_result.get(\"title\"),\n key=key,\n author=\", \".join(author_names),\n connector=self,\n year=search_result.get(\"publish_date\"),\n )\n\n def load_edition_data(self, olkey: str) -> JsonDict:\n \"\"\"query openlibrary for editions of a work\"\"\"\n url = f\"{self.books_url}/works/{olkey}/editions\"\n return self.get_book_data(url)\n\n def expand_book_data(self, book: models.Book) -> None:\n work = book\n # go from the edition to the work, if necessary\n if isinstance(book, models.Edition):\n work = book.parent_work\n\n # we can mass download edition data from OL to avoid repeatedly querying\n try:\n edition_options = self.load_edition_data(work.openlibrary_key)\n except ConnectorException:\n # who knows, man\n return\n\n for edition_data in edition_options.get(\"entries\", []):\n # does this edition have ANY interesting data?\n if ignore_edition(edition_data):\n continue\n create_edition_task.delay(self.connector.id, work.id, edition_data)\n\n\ndef ignore_edition(edition_data: JsonDict) -> bool:\n \"\"\"don't load a million editions that have no metadata\"\"\"\n # an isbn, we love to see it\n if edition_data.get(\"isbn_13\") or edition_data.get(\"isbn_10\"):\n return False\n # grudgingly, oclc can stay\n if edition_data.get(\"oclc_numbers\"):\n return False\n # if it has a cover it can stay\n if edition_data.get(\"covers\"):\n return False\n # keep non-english editions\n if edition_data.get(\"languages\") and \"languages/eng\" not in str(\n edition_data.get(\"languages\")\n ):\n return False\n return True\n\n\ndef get_description(description_blob: Union[JsonDict, str]) -> Optional[str]:\n \"\"\"descriptions can be a string or a dict\"\"\"\n if isinstance(description_blob, dict):\n return description_blob.get(\"value\")\n return description_blob\n\n\ndef get_openlibrary_key(key: str) -> str:\n \"\"\"convert /books/OL27320736M into OL27320736M\"\"\"\n return key.split(\"/\")[-1]\n\n\ndef get_languages(language_blob: Iterable[JsonDict]) -> list[Optional[str]]:\n \"\"\"/language/eng -> English\"\"\"\n langs = []\n for lang in language_blob:\n langs.append(languages.get(lang.get(\"key\", \"\"), None))\n return langs\n\n\ndef get_dict_field(blob: Optional[JsonDict], field_name: str) -> Optional[Any]:\n \"\"\"extract the isni from the remote id data for the author\"\"\"\n if not blob or not isinstance(blob, dict):\n return None\n return blob.get(field_name)\n\n\ndef get_wikipedia_link(links: list[Any]) -> Optional[str]:\n \"\"\"extract wikipedia links\"\"\"\n if not isinstance(links, list):\n return None\n\n for link in links:\n if not isinstance(link, dict):\n continue\n if link.get(\"title\") == \"wikipedia\":\n return link.get(\"url\")\n return None\n\n\ndef get_inventaire_id(links: list[Any]) -> Optional[str]:\n \"\"\"extract and format inventaire ids\"\"\"\n if not isinstance(links, list):\n return None\n\n for link in links:\n if not isinstance(link, dict):\n continue\n if link.get(\"title\") == \"inventaire.io\":\n iv_link = link.get(\"url\")\n if not isinstance(iv_link, str):\n return None\n return iv_link.split(\"/\")[-1]\n return None\n\n\ndef pick_default_edition(options: list[JsonDict]) -> Optional[JsonDict]:\n \"\"\"favor physical copies with covers in english\"\"\"\n if not options:\n return None\n if len(options) == 1:\n return options[0]\n\n options = [e for e in options if e.get(\"covers\")] or options\n options = [\n e for e in options if \"/languages/eng\" in str(e.get(\"languages\"))\n ] or options\n formats = [\"paperback\", \"hardcover\", \"mass market paperback\"]\n options = [\n e for e in options if str(e.get(\"physical_format\")).lower() in formats\n ] or options\n options = [e for e in options if e.get(\"isbn_13\")] or options\n options = [e for e in options if e.get(\"ocaid\")] or options\n return options[0]\n", "path": "bookwyrm/connectors/openlibrary.py"}, {"content": "\"\"\"Clean user-provided text\"\"\"\nimport bleach\n\n\ndef clean(input_text):\n \"\"\"Run through \"bleach\" \"\"\"\n return bleach.clean(\n input_text,\n tags=[\n \"p\",\n \"blockquote\",\n \"br\",\n \"b\",\n \"i\",\n \"strong\",\n \"em\",\n \"pre\",\n \"a\",\n \"span\",\n \"ul\",\n \"ol\",\n \"li\",\n ],\n attributes=[\"href\", \"rel\", \"src\", \"alt\", \"data-mention\"],\n strip=True,\n )\n", "path": "bookwyrm/utils/sanitizer.py"}], "after_files": [{"content": "\"\"\" openlibrary data connector \"\"\"\nimport re\nfrom typing import Any, Optional, Union, Iterator, Iterable\n\nfrom markdown import markdown\n\nfrom bookwyrm import models\nfrom bookwyrm.book_search import SearchResult\nfrom bookwyrm.utils.sanitizer import clean\nfrom .abstract_connector import AbstractConnector, Mapping, JsonDict\nfrom .abstract_connector import get_data, infer_physical_format, unique_physical_format\nfrom .connector_manager import ConnectorException, create_edition_task\nfrom .openlibrary_languages import languages\n\n\nclass Connector(AbstractConnector):\n \"\"\"instantiate a connector for OL\"\"\"\n\n generated_remote_link_field = \"openlibrary_link\"\n\n def __init__(self, identifier: str):\n super().__init__(identifier)\n\n get_first = lambda a, *args: a[0]\n get_remote_id = lambda a, *args: self.base_url + a\n self.book_mappings = [\n Mapping(\"title\"),\n Mapping(\"id\", remote_field=\"key\", formatter=get_remote_id),\n Mapping(\"cover\", remote_field=\"covers\", formatter=self.get_cover_url),\n Mapping(\"sortTitle\", remote_field=\"sort_title\"),\n Mapping(\"subtitle\"),\n Mapping(\"description\", formatter=get_description),\n Mapping(\"languages\", formatter=get_languages),\n Mapping(\"series\", formatter=get_first),\n Mapping(\"seriesNumber\", remote_field=\"series_number\"),\n Mapping(\"subjects\"),\n Mapping(\"subjectPlaces\", remote_field=\"subject_places\"),\n Mapping(\"isbn13\", remote_field=\"isbn_13\", formatter=get_first),\n Mapping(\"isbn10\", remote_field=\"isbn_10\", formatter=get_first),\n Mapping(\"lccn\", formatter=get_first),\n Mapping(\"oclcNumber\", remote_field=\"oclc_numbers\", formatter=get_first),\n Mapping(\n \"openlibraryKey\", remote_field=\"key\", formatter=get_openlibrary_key\n ),\n Mapping(\"goodreadsKey\", remote_field=\"goodreads_key\"),\n Mapping(\"asin\"),\n Mapping(\n \"firstPublishedDate\",\n remote_field=\"first_publish_date\",\n ),\n Mapping(\"publishedDate\", remote_field=\"publish_date\"),\n Mapping(\"pages\", remote_field=\"number_of_pages\"),\n Mapping(\n \"physicalFormat\",\n remote_field=\"physical_format\",\n formatter=infer_physical_format,\n ),\n Mapping(\n \"physicalFormatDetail\",\n remote_field=\"physical_format\",\n formatter=unique_physical_format,\n ),\n Mapping(\"publishers\"),\n ]\n\n self.author_mappings = [\n Mapping(\"id\", remote_field=\"key\", formatter=get_remote_id),\n Mapping(\"name\"),\n Mapping(\n \"openlibraryKey\", remote_field=\"key\", formatter=get_openlibrary_key\n ),\n Mapping(\"born\", remote_field=\"birth_date\"),\n Mapping(\"died\", remote_field=\"death_date\"),\n Mapping(\"bio\", formatter=get_description),\n Mapping(\n \"isni\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"isni\"),\n ),\n Mapping(\n \"asin\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"amazon\"),\n ),\n Mapping(\n \"viaf\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"viaf\"),\n ),\n Mapping(\n \"wikidata\",\n remote_field=\"remote_ids\",\n formatter=lambda b: get_dict_field(b, \"wikidata\"),\n ),\n Mapping(\n \"wikipedia_link\", remote_field=\"links\", formatter=get_wikipedia_link\n ),\n Mapping(\"inventaire_id\", remote_field=\"links\", formatter=get_inventaire_id),\n ]\n\n def get_book_data(self, remote_id: str) -> JsonDict:\n data = get_data(remote_id)\n if data.get(\"type\", {}).get(\"key\") == \"/type/redirect\":\n remote_id = self.base_url + data.get(\"location\", \"\")\n return get_data(remote_id)\n return data\n\n def get_remote_id_from_data(self, data: JsonDict) -> str:\n \"\"\"format a url from an openlibrary id field\"\"\"\n try:\n key = data[\"key\"]\n except KeyError:\n raise ConnectorException(\"Invalid book data\")\n return f\"{self.books_url}{key}\"\n\n def is_work_data(self, data: JsonDict) -> bool:\n return bool(re.match(r\"^[\\/\\w]+OL\\d+W$\", data[\"key\"]))\n\n def get_edition_from_work_data(self, data: JsonDict) -> JsonDict:\n try:\n key = data[\"key\"]\n except KeyError:\n raise ConnectorException(\"Invalid book data\")\n url = f\"{self.books_url}{key}/editions\"\n data = self.get_book_data(url)\n edition = pick_default_edition(data[\"entries\"])\n if not edition:\n raise ConnectorException(\"No editions for work\")\n return edition\n\n def get_work_from_edition_data(self, data: JsonDict) -> JsonDict:\n try:\n key = data[\"works\"][0][\"key\"]\n except (IndexError, KeyError):\n raise ConnectorException(\"No work found for edition\")\n url = f\"{self.books_url}{key}\"\n return self.get_book_data(url)\n\n def get_authors_from_data(self, data: JsonDict) -> Iterator[models.Author]:\n \"\"\"parse author json and load or create authors\"\"\"\n for author_blob in data.get(\"authors\", []):\n author_blob = author_blob.get(\"author\", author_blob)\n # this id is \"/authors/OL1234567A\"\n author_id = author_blob[\"key\"]\n url = f\"{self.base_url}{author_id}\"\n author = self.get_or_create_author(url)\n if not author:\n continue\n yield author\n\n def get_cover_url(self, cover_blob: list[str], size: str = \"L\") -> Optional[str]:\n \"\"\"ask openlibrary for the cover\"\"\"\n if not cover_blob:\n return None\n cover_id = cover_blob[0]\n image_name = f\"{cover_id}-{size}.jpg\"\n return f\"{self.covers_url}/b/id/{image_name}\"\n\n def parse_search_data(\n self, data: JsonDict, min_confidence: float\n ) -> Iterator[SearchResult]:\n for idx, search_result in enumerate(data.get(\"docs\", [])):\n # build the remote id from the openlibrary key\n key = self.books_url + search_result[\"key\"]\n author = search_result.get(\"author_name\") or [\"Unknown\"]\n cover_blob = search_result.get(\"cover_i\")\n cover = self.get_cover_url([cover_blob], size=\"M\") if cover_blob else None\n\n # OL doesn't provide confidence, but it does sort by an internal ranking, so\n # this confidence value is relative to the list position\n confidence = 1 / (idx + 1)\n\n yield SearchResult(\n title=search_result.get(\"title\"),\n key=key,\n author=\", \".join(author),\n connector=self,\n year=search_result.get(\"first_publish_year\"),\n cover=cover,\n confidence=confidence,\n )\n\n def parse_isbn_search_data(self, data: JsonDict) -> Iterator[SearchResult]:\n for search_result in list(data.values()):\n # build the remote id from the openlibrary key\n key = self.books_url + search_result[\"key\"]\n authors = search_result.get(\"authors\") or [{\"name\": \"Unknown\"}]\n author_names = [author.get(\"name\") for author in authors]\n yield SearchResult(\n title=search_result.get(\"title\"),\n key=key,\n author=\", \".join(author_names),\n connector=self,\n year=search_result.get(\"publish_date\"),\n )\n\n def load_edition_data(self, olkey: str) -> JsonDict:\n \"\"\"query openlibrary for editions of a work\"\"\"\n url = f\"{self.books_url}/works/{olkey}/editions\"\n return self.get_book_data(url)\n\n def expand_book_data(self, book: models.Book) -> None:\n work = book\n # go from the edition to the work, if necessary\n if isinstance(book, models.Edition):\n work = book.parent_work\n\n # we can mass download edition data from OL to avoid repeatedly querying\n try:\n edition_options = self.load_edition_data(work.openlibrary_key)\n except ConnectorException:\n # who knows, man\n return\n\n for edition_data in edition_options.get(\"entries\", []):\n # does this edition have ANY interesting data?\n if ignore_edition(edition_data):\n continue\n create_edition_task.delay(self.connector.id, work.id, edition_data)\n\n\ndef ignore_edition(edition_data: JsonDict) -> bool:\n \"\"\"don't load a million editions that have no metadata\"\"\"\n # an isbn, we love to see it\n if edition_data.get(\"isbn_13\") or edition_data.get(\"isbn_10\"):\n return False\n # grudgingly, oclc can stay\n if edition_data.get(\"oclc_numbers\"):\n return False\n # if it has a cover it can stay\n if edition_data.get(\"covers\"):\n return False\n # keep non-english editions\n if edition_data.get(\"languages\") and \"languages/eng\" not in str(\n edition_data.get(\"languages\")\n ):\n return False\n return True\n\n\ndef get_description(description_blob: Union[JsonDict, str]) -> str:\n \"\"\"descriptions can be a string or a dict\"\"\"\n if isinstance(description_blob, dict):\n description = markdown(description_blob.get(\"value\", \"\"))\n else:\n description = markdown(description_blob)\n\n if (\n description.startswith(\"<p>\")\n and description.endswith(\"</p>\")\n and description.count(\"<p>\") == 1\n ):\n # If there is just one <p> tag and it is around the text remove it\n return description[len(\"<p>\") : -len(\"</p>\")].strip()\n\n return clean(description)\n\n\ndef get_openlibrary_key(key: str) -> str:\n \"\"\"convert /books/OL27320736M into OL27320736M\"\"\"\n return key.split(\"/\")[-1]\n\n\ndef get_languages(language_blob: Iterable[JsonDict]) -> list[Optional[str]]:\n \"\"\"/language/eng -> English\"\"\"\n langs = []\n for lang in language_blob:\n langs.append(languages.get(lang.get(\"key\", \"\"), None))\n return langs\n\n\ndef get_dict_field(blob: Optional[JsonDict], field_name: str) -> Optional[Any]:\n \"\"\"extract the isni from the remote id data for the author\"\"\"\n if not blob or not isinstance(blob, dict):\n return None\n return blob.get(field_name)\n\n\ndef get_wikipedia_link(links: list[Any]) -> Optional[str]:\n \"\"\"extract wikipedia links\"\"\"\n if not isinstance(links, list):\n return None\n\n for link in links:\n if not isinstance(link, dict):\n continue\n if link.get(\"title\") == \"wikipedia\":\n return link.get(\"url\")\n return None\n\n\ndef get_inventaire_id(links: list[Any]) -> Optional[str]:\n \"\"\"extract and format inventaire ids\"\"\"\n if not isinstance(links, list):\n return None\n\n for link in links:\n if not isinstance(link, dict):\n continue\n if link.get(\"title\") == \"inventaire.io\":\n iv_link = link.get(\"url\")\n if not isinstance(iv_link, str):\n return None\n return iv_link.split(\"/\")[-1]\n return None\n\n\ndef pick_default_edition(options: list[JsonDict]) -> Optional[JsonDict]:\n \"\"\"favor physical copies with covers in english\"\"\"\n if not options:\n return None\n if len(options) == 1:\n return options[0]\n\n options = [e for e in options if e.get(\"covers\")] or options\n options = [\n e for e in options if \"/languages/eng\" in str(e.get(\"languages\"))\n ] or options\n formats = [\"paperback\", \"hardcover\", \"mass market paperback\"]\n options = [\n e for e in options if str(e.get(\"physical_format\")).lower() in formats\n ] or options\n options = [e for e in options if e.get(\"isbn_13\")] or options\n options = [e for e in options if e.get(\"ocaid\")] or options\n return options[0]\n", "path": "bookwyrm/connectors/openlibrary.py"}, {"content": "\"\"\"Clean user-provided text\"\"\"\nimport bleach\n\n\ndef clean(input_text: str) -> str:\n \"\"\"Run through \"bleach\" \"\"\"\n return bleach.clean(\n input_text,\n tags=[\n \"p\",\n \"blockquote\",\n \"br\",\n \"b\",\n \"i\",\n \"strong\",\n \"em\",\n \"pre\",\n \"a\",\n \"span\",\n \"ul\",\n \"ol\",\n \"li\",\n ],\n attributes=[\"href\", \"rel\", \"src\", \"alt\", \"data-mention\"],\n strip=True,\n )\n", "path": "bookwyrm/utils/sanitizer.py"}]}
| 4,024 | 447 |
gh_patches_debug_26805
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kanal7 Plugin defective!
Hi there,
can you have a look on the kanal7.py please?
"error: No playable streams found on this URL"
Greetings
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/kanal7.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class Kanal7(Plugin):
11 url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle")
12 iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"')
13 stream_re = re.compile(r'src: "(http[^"]*?)"')
14
15 @classmethod
16 def can_handle_url(cls, url):
17 return cls.url_re.match(url) is not None
18
19 def find_iframe(self, url):
20 res = http.get(url)
21 # find iframe url
22 iframe = self.iframe_re.search(res.text)
23 iframe_url = iframe and iframe.group(1)
24 if iframe_url:
25 self.logger.debug("Found iframe: {}", iframe_url)
26 return iframe_url
27
28 def _get_streams(self):
29 iframe1 = self.find_iframe(self.url)
30 if iframe1:
31 iframe2 = self.find_iframe(iframe1)
32 if iframe2:
33 ires = http.get(iframe2)
34 stream_m = self.stream_re.search(ires.text)
35 stream_url = stream_m and stream_m.group(1)
36 if stream_url:
37 yield "live", HLSStream(self.session, stream_url)
38 else:
39 self.logger.error("Could not find second iframe, has the page layout changed?")
40 else:
41 self.logger.error("Could not find iframe, has the page layout changed?")
42
43
44 __plugin__ = Kanal7
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py
--- a/src/streamlink/plugins/kanal7.py
+++ b/src/streamlink/plugins/kanal7.py
@@ -3,6 +3,7 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
+from streamlink.plugin.api import useragents
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
@@ -10,7 +11,7 @@
class Kanal7(Plugin):
url_re = re.compile(r"https?://(?:www.)?kanal7.com/canli-izle")
iframe_re = re.compile(r'iframe .*?src="(http://[^"]*?)"')
- stream_re = re.compile(r'src: "(http[^"]*?)"')
+ stream_re = re.compile(r'src="(http[^"]*?)"')
@classmethod
def can_handle_url(cls, url):
@@ -34,7 +35,7 @@
stream_m = self.stream_re.search(ires.text)
stream_url = stream_m and stream_m.group(1)
if stream_url:
- yield "live", HLSStream(self.session, stream_url)
+ yield "live", HLSStream(self.session, stream_url, headers={"Referer": iframe2})
else:
self.logger.error("Could not find second iframe, has the page layout changed?")
else:
|
{"golden_diff": "diff --git a/src/streamlink/plugins/kanal7.py b/src/streamlink/plugins/kanal7.py\n--- a/src/streamlink/plugins/kanal7.py\n+++ b/src/streamlink/plugins/kanal7.py\n@@ -3,6 +3,7 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.plugin.api import useragents\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n \n@@ -10,7 +11,7 @@\n class Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n- stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n+ stream_re = re.compile(r'src=\"(http[^\"]*?)\"')\n \n @classmethod\n def can_handle_url(cls, url):\n@@ -34,7 +35,7 @@\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n- yield \"live\", HLSStream(self.session, stream_url)\n+ yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n", "issue": "Kanal7 Plugin defective!\nHi there,\r\n\r\ncan you have a look on the kanal7.py please?\r\n\r\n\"error: No playable streams found on this URL\"\r\n\r\nGreetings\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'src: \"(http[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url)\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}], "after_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import useragents\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass Kanal7(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?kanal7.com/canli-izle\")\n iframe_re = re.compile(r'iframe .*?src=\"(http://[^\"]*?)\"')\n stream_re = re.compile(r'src=\"(http[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def find_iframe(self, url):\n res = http.get(url)\n # find iframe url\n iframe = self.iframe_re.search(res.text)\n iframe_url = iframe and iframe.group(1)\n if iframe_url:\n self.logger.debug(\"Found iframe: {}\", iframe_url)\n return iframe_url\n\n def _get_streams(self):\n iframe1 = self.find_iframe(self.url)\n if iframe1:\n iframe2 = self.find_iframe(iframe1)\n if iframe2:\n ires = http.get(iframe2)\n stream_m = self.stream_re.search(ires.text)\n stream_url = stream_m and stream_m.group(1)\n if stream_url:\n yield \"live\", HLSStream(self.session, stream_url, headers={\"Referer\": iframe2})\n else:\n self.logger.error(\"Could not find second iframe, has the page layout changed?\")\n else:\n self.logger.error(\"Could not find iframe, has the page layout changed?\")\n\n\n__plugin__ = Kanal7\n", "path": "src/streamlink/plugins/kanal7.py"}]}
| 732 | 317 |
gh_patches_debug_24443
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-2345
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8 when using losses.SigmoidFocalCrossEntropy
**System information**
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 20.04
- TensorFlow version and how it was installed (source or binary): 2.4.0, pip
- TensorFlow-Addons version and how it was installed (source or binary): 0.11.2, pip
- Python version: 3.8
- Is GPU used? (yes/no): yes
```
ValueError: in user code:
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:805 train_function *
return step_function(self, iterator)
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow_addons/utils/keras_utils.py:61 call *
return self.fn(y_true, y_pred, **self._fn_kwargs)
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow_addons/losses/focal_loss.py:122 sigmoid_focal_crossentropy *
y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:201 wrapper **
return target(*args, **kwargs)
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1404 convert_to_tensor_v2_with_dispatch
return convert_to_tensor_v2(
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1410 convert_to_tensor_v2
return convert_to_tensor(
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/profiler/trace.py:163 wrapped
return func(*args, **kwargs)
/home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1507 convert_to_tensor
raise ValueError(
ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8: <tf.Tensor 'y_true:0' shape=(None, None, None, 1) dtype=uint8>
```
When y_true had dtype of uint8 and y_pred had dtype float32, `tf.conver_to_tensor(y_true, y_pred.dtype)` in focal loss function failed.
Is it intended that y_true and y_pred have the same dtype when passed to the loss function? Do I need to convert y_true into float32 tensor in data processing step?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/losses/focal_loss.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements Focal loss."""
16
17 import tensorflow as tf
18 import tensorflow.keras.backend as K
19 from typeguard import typechecked
20
21 from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
22 from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
23
24
25 @tf.keras.utils.register_keras_serializable(package="Addons")
26 class SigmoidFocalCrossEntropy(LossFunctionWrapper):
27 """Implements the focal loss function.
28
29 Focal loss was first introduced in the RetinaNet paper
30 (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
31 classification when you have highly imbalanced classes. It down-weights
32 well-classified examples and focuses on hard examples. The loss value is
33 much high for a sample which is misclassified by the classifier as compared
34 to the loss value corresponding to a well-classified example. One of the
35 best use-cases of focal loss is its usage in object detection where the
36 imbalance between the background class and other classes is extremely high.
37
38 Usage:
39
40 >>> fl = tfa.losses.SigmoidFocalCrossEntropy()
41 >>> loss = fl(
42 ... y_true = [[1.0], [1.0], [0.0]],y_pred = [[0.97], [0.91], [0.03]])
43 >>> loss
44 <tf.Tensor: shape=(3,), dtype=float32, numpy=array([6.8532745e-06, 1.9097870e-04, 2.0559824e-05],
45 dtype=float32)>
46
47 Usage with `tf.keras` API:
48
49 >>> model = tf.keras.Model()
50 >>> model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())
51
52 Args:
53 alpha: balancing factor, default value is 0.25.
54 gamma: modulating factor, default value is 2.0.
55
56 Returns:
57 Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
58 shape as `y_true`; otherwise, it is scalar.
59
60 Raises:
61 ValueError: If the shape of `sample_weight` is invalid or value of
62 `gamma` is less than zero.
63 """
64
65 @typechecked
66 def __init__(
67 self,
68 from_logits: bool = False,
69 alpha: FloatTensorLike = 0.25,
70 gamma: FloatTensorLike = 2.0,
71 reduction: str = tf.keras.losses.Reduction.NONE,
72 name: str = "sigmoid_focal_crossentropy",
73 ):
74 super().__init__(
75 sigmoid_focal_crossentropy,
76 name=name,
77 reduction=reduction,
78 from_logits=from_logits,
79 alpha=alpha,
80 gamma=gamma,
81 )
82
83
84 @tf.keras.utils.register_keras_serializable(package="Addons")
85 @tf.function
86 def sigmoid_focal_crossentropy(
87 y_true: TensorLike,
88 y_pred: TensorLike,
89 alpha: FloatTensorLike = 0.25,
90 gamma: FloatTensorLike = 2.0,
91 from_logits: bool = False,
92 ) -> tf.Tensor:
93 """Implements the focal loss function.
94
95 Focal loss was first introduced in the RetinaNet paper
96 (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
97 classification when you have highly imbalanced classes. It down-weights
98 well-classified examples and focuses on hard examples. The loss value is
99 much high for a sample which is misclassified by the classifier as compared
100 to the loss value corresponding to a well-classified example. One of the
101 best use-cases of focal loss is its usage in object detection where the
102 imbalance between the background class and other classes is extremely high.
103
104 Args:
105 y_true: true targets tensor.
106 y_pred: predictions tensor.
107 alpha: balancing factor.
108 gamma: modulating factor.
109
110 Returns:
111 Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
112 same shape as `y_true`; otherwise, it is scalar.
113 """
114 if gamma and gamma < 0:
115 raise ValueError("Value of gamma should be greater than or equal to zero")
116
117 y_pred = tf.convert_to_tensor(y_pred)
118 y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
119
120 # Get the cross_entropy for each entry
121 ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
122
123 # If logits are provided then convert the predictions into probabilities
124 if from_logits:
125 pred_prob = tf.sigmoid(y_pred)
126 else:
127 pred_prob = y_pred
128
129 p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
130 alpha_factor = 1.0
131 modulating_factor = 1.0
132
133 if alpha:
134 alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
135 alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
136
137 if gamma:
138 gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
139 modulating_factor = tf.pow((1.0 - p_t), gamma)
140
141 # compute the final loss and return
142 return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py
--- a/tensorflow_addons/losses/focal_loss.py
+++ b/tensorflow_addons/losses/focal_loss.py
@@ -112,10 +112,10 @@
same shape as `y_true`; otherwise, it is scalar.
"""
if gamma and gamma < 0:
- raise ValueError("Value of gamma should be greater than or equal to zero")
+ raise ValueError("Value of gamma should be greater than or equal to zero.")
y_pred = tf.convert_to_tensor(y_pred)
- y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
+ y_true = tf.cast(y_true, dtype=y_pred.dtype)
# Get the cross_entropy for each entry
ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
@@ -131,11 +131,11 @@
modulating_factor = 1.0
if alpha:
- alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
+ alpha = tf.cast(alpha, dtype=y_true.dtype)
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
if gamma:
- gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
+ gamma = tf.cast(gamma, dtype=y_true.dtype)
modulating_factor = tf.pow((1.0 - p_t), gamma)
# compute the final loss and return
|
{"golden_diff": "diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py\n--- a/tensorflow_addons/losses/focal_loss.py\n+++ b/tensorflow_addons/losses/focal_loss.py\n@@ -112,10 +112,10 @@\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n- raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n+ raise ValueError(\"Value of gamma should be greater than or equal to zero.\")\n \n y_pred = tf.convert_to_tensor(y_pred)\n- y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n+ y_true = tf.cast(y_true, dtype=y_pred.dtype)\n \n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n@@ -131,11 +131,11 @@\n modulating_factor = 1.0\n \n if alpha:\n- alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n+ alpha = tf.cast(alpha, dtype=y_true.dtype)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n \n if gamma:\n- gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n+ gamma = tf.cast(gamma, dtype=y_true.dtype)\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n \n # compute the final loss and return\n", "issue": " ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8 when using losses.SigmoidFocalCrossEntropy\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 20.04\r\n- TensorFlow version and how it was installed (source or binary): 2.4.0, pip\r\n- TensorFlow-Addons version and how it was installed (source or binary): 0.11.2, pip\r\n- Python version: 3.8\r\n- Is GPU used? (yes/no): yes\r\n\r\n```\r\nValueError: in user code:\r\n\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:805 train_function *\r\n return step_function(self, iterator)\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow_addons/utils/keras_utils.py:61 call *\r\n return self.fn(y_true, y_pred, **self._fn_kwargs)\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow_addons/losses/focal_loss.py:122 sigmoid_focal_crossentropy *\r\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:201 wrapper **\r\n return target(*args, **kwargs)\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1404 convert_to_tensor_v2_with_dispatch\r\n return convert_to_tensor_v2(\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1410 convert_to_tensor_v2\r\n return convert_to_tensor(\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/profiler/trace.py:163 wrapped\r\n return func(*args, **kwargs)\r\n /home/eck/software/anaconda3/envs/hk2/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:1507 convert_to_tensor\r\n raise ValueError(\r\n\r\n ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8: <tf.Tensor 'y_true:0' shape=(None, None, None, 1) dtype=uint8>\r\n```\r\n\r\n\r\nWhen y_true had dtype of uint8 and y_pred had dtype float32, `tf.conver_to_tensor(y_true, y_pred.dtype)` in focal loss function failed.\r\n\r\nIs it intended that y_true and y_pred have the same dtype when passed to the loss function? Do I need to convert y_true into float32 tensor in data processing step?\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom typeguard import typechecked\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n >>> fl = tfa.losses.SigmoidFocalCrossEntropy()\n >>> loss = fl(\n ... y_true = [[1.0], [1.0], [0.0]],y_pred = [[0.97], [0.91], [0.03]])\n >>> loss\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([6.8532745e-06, 1.9097870e-04, 2.0559824e-05],\n dtype=float32)>\n\n Usage with `tf.keras` API:\n\n >>> model = tf.keras.Model()\n >>> model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n\n Args:\n alpha: balancing factor, default value is 0.25.\n gamma: modulating factor, default value is 2.0.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero.\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Args:\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom typeguard import typechecked\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n >>> fl = tfa.losses.SigmoidFocalCrossEntropy()\n >>> loss = fl(\n ... y_true = [[1.0], [1.0], [0.0]],y_pred = [[0.97], [0.91], [0.03]])\n >>> loss\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([6.8532745e-06, 1.9097870e-04, 2.0559824e-05],\n dtype=float32)>\n\n Usage with `tf.keras` API:\n\n >>> model = tf.keras.Model()\n >>> model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n\n Args:\n alpha: balancing factor, default value is 0.25.\n gamma: modulating factor, default value is 2.0.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero.\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Args:\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero.\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.cast(alpha, dtype=y_true.dtype)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.cast(gamma, dtype=y_true.dtype)\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}]}
| 2,568 | 351 |
gh_patches_debug_23275
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-3304
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
draft and live action labels are unclear

I don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/wagtail_hooks.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 from django import forms
4 from django.contrib.auth.models import Permission
5 from django.contrib.staticfiles.templatetags.staticfiles import static
6 from django.core.urlresolvers import reverse
7 from django.utils.translation import ugettext_lazy as _
8 from wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu
9 from wagtail.wagtailadmin.search import SearchArea
10 from wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton
11 from wagtail.wagtailcore import hooks
12 from wagtail.wagtailcore.permissions import collection_permission_policy
13
14
15 class ExplorerMenuItem(MenuItem):
16 @property
17 def media(self):
18 return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])
19
20
21 @hooks.register('register_admin_menu_item')
22 def register_explorer_menu_item():
23 return ExplorerMenuItem(
24 _('Explorer'), reverse('wagtailadmin_explore_root'),
25 name='explorer',
26 classnames='icon icon-folder-open-inverse dl-trigger',
27 attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},
28 order=100)
29
30
31 class SettingsMenuItem(SubmenuMenuItem):
32 template = 'wagtailadmin/shared/menu_settings_menu_item.html'
33
34
35 @hooks.register('register_admin_menu_item')
36 def register_settings_menu():
37 return SettingsMenuItem(
38 _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)
39
40
41 @hooks.register('register_permissions')
42 def register_permissions():
43 return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')
44
45
46 @hooks.register('register_admin_search_area')
47 def register_pages_search_area():
48 return SearchArea(
49 _('Pages'), reverse('wagtailadmin_pages:search'),
50 name='pages',
51 classnames='icon icon-folder-open-inverse',
52 order=100)
53
54
55 class CollectionsMenuItem(MenuItem):
56 def is_shown(self, request):
57 return collection_permission_policy.user_has_any_permission(
58 request.user, ['add', 'change', 'delete']
59 )
60
61
62 @hooks.register('register_settings_menu_item')
63 def register_collections_menu_item():
64 return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)
65
66
67 @hooks.register('register_page_listing_buttons')
68 def page_listing_buttons(page, page_perms, is_parent=False):
69 if page_perms.can_edit():
70 yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),
71 attrs={'title': _('Edit this page')}, priority=10)
72 if page.has_unpublished_changes:
73 yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),
74 attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)
75 if page.live and page.url:
76 yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30)
77 if page_perms.can_add_subpage():
78 if is_parent:
79 yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
80 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)
81 else:
82 yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
83 attrs={'title': _("Add a child page to '{0}' ").format(page.get_admin_display_title())}, priority=40)
84
85 yield ButtonWithDropdownFromHook(
86 _('More'),
87 hook_name='register_page_listing_more_buttons',
88 page=page,
89 page_perms=page_perms,
90 is_parent=is_parent,
91 attrs={'target': '_blank', 'title': _('View more options')}, priority=50)
92
93
94 @hooks.register('register_page_listing_more_buttons')
95 def page_listing_more_buttons(page, page_perms, is_parent=False):
96 if page_perms.can_move():
97 yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),
98 attrs={"title": _('Move this page')}, priority=10)
99 if not page.is_root():
100 yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),
101 attrs={'title': _('Copy this page')}, priority=20)
102 if page_perms.can_delete():
103 yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),
104 attrs={'title': _('Delete this page')}, priority=30)
105 if page_perms.can_unpublish():
106 yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),
107 attrs={'title': _('Unpublish this page')}, priority=40)
108 if not page.is_root():
109 yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),
110 attrs={'title': _("View this page's revision history")}, priority=50)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py
--- a/wagtail/wagtailadmin/wagtail_hooks.py
+++ b/wagtail/wagtailadmin/wagtail_hooks.py
@@ -70,10 +70,10 @@
yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),
attrs={'title': _('Edit this page')}, priority=10)
if page.has_unpublished_changes:
- yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),
+ yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),
attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)
if page.live and page.url:
- yield PageListingButton(_('Live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30)
+ yield PageListingButton(_('View live'), page.url, attrs={'target': "_blank", 'title': _('View live')}, priority=30)
if page_perms.can_add_subpage():
if is_parent:
yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),
|
{"golden_diff": "diff --git a/wagtail/wagtailadmin/wagtail_hooks.py b/wagtail/wagtailadmin/wagtail_hooks.py\n--- a/wagtail/wagtailadmin/wagtail_hooks.py\n+++ b/wagtail/wagtailadmin/wagtail_hooks.py\n@@ -70,10 +70,10 @@\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n- yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n+ yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n- yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n+ yield PageListingButton(_('View live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n", "issue": "draft and live action labels are unclear \n\r\n\r\nI don't think it's clear what the 'draft' and 'live' buttons will do. Could we include the full labels 'Preview draft' and 'View live'? \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu\nfrom wagtail.wagtailadmin.search import SearchArea\nfrom wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.permissions import collection_permission_policy\n\n\nclass ExplorerMenuItem(MenuItem):\n @property\n def media(self):\n return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])\n\n\[email protected]('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Explorer'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n classnames='icon icon-folder-open-inverse dl-trigger',\n attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\[email protected]('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)\n\n\[email protected]('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\[email protected]('register_admin_search_area')\ndef register_pages_search_area():\n return SearchArea(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)\n\n\[email protected]('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False):\n if page_perms.can_edit():\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n yield PageListingButton(_('Draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n yield PageListingButton(_('Live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)\n else:\n yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, priority=40)\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n attrs={'target': '_blank', 'title': _('View more options')}, priority=50)\n\n\[email protected]('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False):\n if page_perms.can_move():\n yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _('Move this page')}, priority=10)\n if not page.is_root():\n yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),\n attrs={'title': _('Copy this page')}, priority=20)\n if page_perms.can_delete():\n yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),\n attrs={'title': _('Delete this page')}, priority=30)\n if page_perms.can_unpublish():\n yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),\n attrs={'title': _('Unpublish this page')}, priority=40)\n if not page.is_root():\n yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),\n attrs={'title': _(\"View this page's revision history\")}, priority=50)\n", "path": "wagtail/wagtailadmin/wagtail_hooks.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\nfrom django.core.urlresolvers import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom wagtail.wagtailadmin.menu import MenuItem, SubmenuMenuItem, settings_menu\nfrom wagtail.wagtailadmin.search import SearchArea\nfrom wagtail.wagtailadmin.widgets import Button, ButtonWithDropdownFromHook, PageListingButton\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.permissions import collection_permission_policy\n\n\nclass ExplorerMenuItem(MenuItem):\n @property\n def media(self):\n return forms.Media(js=[static('wagtailadmin/js/explorer-menu.js')])\n\n\[email protected]('register_admin_menu_item')\ndef register_explorer_menu_item():\n return ExplorerMenuItem(\n _('Explorer'), reverse('wagtailadmin_explore_root'),\n name='explorer',\n classnames='icon icon-folder-open-inverse dl-trigger',\n attrs={'data-explorer-menu-url': reverse('wagtailadmin_explorer_nav')},\n order=100)\n\n\nclass SettingsMenuItem(SubmenuMenuItem):\n template = 'wagtailadmin/shared/menu_settings_menu_item.html'\n\n\[email protected]('register_admin_menu_item')\ndef register_settings_menu():\n return SettingsMenuItem(\n _('Settings'), settings_menu, classnames='icon icon-cogs', order=10000)\n\n\[email protected]('register_permissions')\ndef register_permissions():\n return Permission.objects.filter(content_type__app_label='wagtailadmin', codename='access_admin')\n\n\[email protected]('register_admin_search_area')\ndef register_pages_search_area():\n return SearchArea(\n _('Pages'), reverse('wagtailadmin_pages:search'),\n name='pages',\n classnames='icon icon-folder-open-inverse',\n order=100)\n\n\nclass CollectionsMenuItem(MenuItem):\n def is_shown(self, request):\n return collection_permission_policy.user_has_any_permission(\n request.user, ['add', 'change', 'delete']\n )\n\n\[email protected]('register_settings_menu_item')\ndef register_collections_menu_item():\n return CollectionsMenuItem(_('Collections'), reverse('wagtailadmin_collections:index'), classnames='icon icon-folder-open-1', order=700)\n\n\[email protected]('register_page_listing_buttons')\ndef page_listing_buttons(page, page_perms, is_parent=False):\n if page_perms.can_edit():\n yield PageListingButton(_('Edit'), reverse('wagtailadmin_pages:edit', args=[page.id]),\n attrs={'title': _('Edit this page')}, priority=10)\n if page.has_unpublished_changes:\n yield PageListingButton(_('View draft'), reverse('wagtailadmin_pages:view_draft', args=[page.id]),\n attrs={'title': _('Preview draft'), 'target': '_blank'}, priority=20)\n if page.live and page.url:\n yield PageListingButton(_('View live'), page.url, attrs={'target': \"_blank\", 'title': _('View live')}, priority=30)\n if page_perms.can_add_subpage():\n if is_parent:\n yield Button(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, classes={'button', 'button-small', 'bicolor', 'icon', 'white', 'icon-plus'}, priority=40)\n else:\n yield PageListingButton(_('Add child page'), reverse('wagtailadmin_pages:add_subpage', args=[page.id]),\n attrs={'title': _(\"Add a child page to '{0}' \").format(page.get_admin_display_title())}, priority=40)\n\n yield ButtonWithDropdownFromHook(\n _('More'),\n hook_name='register_page_listing_more_buttons',\n page=page,\n page_perms=page_perms,\n is_parent=is_parent,\n attrs={'target': '_blank', 'title': _('View more options')}, priority=50)\n\n\[email protected]('register_page_listing_more_buttons')\ndef page_listing_more_buttons(page, page_perms, is_parent=False):\n if page_perms.can_move():\n yield Button(_('Move'), reverse('wagtailadmin_pages:move', args=[page.id]),\n attrs={\"title\": _('Move this page')}, priority=10)\n if not page.is_root():\n yield Button(_('Copy'), reverse('wagtailadmin_pages:copy', args=[page.id]),\n attrs={'title': _('Copy this page')}, priority=20)\n if page_perms.can_delete():\n yield Button(_('Delete'), reverse('wagtailadmin_pages:delete', args=[page.id]),\n attrs={'title': _('Delete this page')}, priority=30)\n if page_perms.can_unpublish():\n yield Button(_('Unpublish'), reverse('wagtailadmin_pages:unpublish', args=[page.id]),\n attrs={'title': _('Unpublish this page')}, priority=40)\n if not page.is_root():\n yield Button(_('Revisions'), reverse('wagtailadmin_pages:revisions_index', args=[page.id]),\n attrs={'title': _(\"View this page's revision history\")}, priority=50)\n", "path": "wagtail/wagtailadmin/wagtail_hooks.py"}]}
| 1,740 | 301 |
gh_patches_debug_14487
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2853
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make API redirects respect query params
<!--
Please add the appropriate label for what change should be made:
docs: changes to the documentation)
refactor: refactoring production code, eg. renaming a variable or rewriting a function
test: adding missing tests, refactoring tests; no production code change
chore: updating poetry etc; no production code change
-->
### Describe the change
As seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers.
### Motivation
#2828 #2821
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/partners/api/v2/urls.py`
Content:
```
1 """Partners app API v2 urls."""
2 from django.urls import path
3 from django.views.generic import RedirectView
4
5 from partners.api.v2.views import (
6 PartnerDetailView,
7 PartnerListView,
8 VacancyCategoryListView,
9 VacancyDetailView,
10 VacancyListView,
11 )
12
13 app_name = "partners"
14
15 urlpatterns = [
16 path(
17 "partners/events/",
18 RedirectView.as_view(
19 pattern_name="api:v2:events:external-events-list", permanent=False
20 ),
21 name="partner-events-list",
22 ),
23 path(
24 "partners/events/<int:pk>/",
25 RedirectView.as_view(
26 pattern_name="api:v2:events:external-event-detail", permanent=False
27 ),
28 name="partner-events-detail",
29 ),
30 path("partners/vacancies/", VacancyListView.as_view(), name="vacancies-list"),
31 path(
32 "partners/vacancies/categories/",
33 VacancyCategoryListView.as_view(),
34 name="vacancy-categories-list",
35 ),
36 path(
37 "partners/vacancies/<int:pk>/",
38 VacancyDetailView.as_view(),
39 name="vacancies-detail",
40 ),
41 path("partners/", PartnerListView.as_view(), name="partners-list"),
42 path("partners/<int:pk>/", PartnerDetailView.as_view(), name="partners-detail"),
43 ]
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py
--- a/website/partners/api/v2/urls.py
+++ b/website/partners/api/v2/urls.py
@@ -16,14 +16,18 @@
path(
"partners/events/",
RedirectView.as_view(
- pattern_name="api:v2:events:external-events-list", permanent=False
+ pattern_name="api:v2:events:external-events-list",
+ permanent=False,
+ query_string=True,
),
name="partner-events-list",
),
path(
"partners/events/<int:pk>/",
RedirectView.as_view(
- pattern_name="api:v2:events:external-event-detail", permanent=False
+ pattern_name="api:v2:events:external-event-detail",
+ permanent=False,
+ query_string=True,
),
name="partner-events-detail",
),
|
{"golden_diff": "diff --git a/website/partners/api/v2/urls.py b/website/partners/api/v2/urls.py\n--- a/website/partners/api/v2/urls.py\n+++ b/website/partners/api/v2/urls.py\n@@ -16,14 +16,18 @@\n path(\n \"partners/events/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-events-list\", permanent=False\n+ pattern_name=\"api:v2:events:external-events-list\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n- pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n+ pattern_name=\"api:v2:events:external-event-detail\",\n+ permanent=False,\n+ query_string=True,\n ),\n name=\"partner-events-detail\",\n ),\n", "issue": "Make API redirects respect query params\n<!--\n\nPlease add the appropriate label for what change should be made:\ndocs: changes to the documentation)\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\ntest: adding missing tests, refactoring tests; no production code change\nchore: updating poetry etc; no production code change\n\n-->\n\n### Describe the change\nAs seen in #2828 and #2821, some old API endpoints are redirected to their new replacements. This redirection does not propagate any query parameters. It would be nice to pass query parameters along when redirecting to a replacement API endpoint to prevent breaking consumers.\n\n### Motivation\n#2828 #2821\n", "before_files": [{"content": "\"\"\"Partners app API v2 urls.\"\"\"\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom partners.api.v2.views import (\n PartnerDetailView,\n PartnerListView,\n VacancyCategoryListView,\n VacancyDetailView,\n VacancyListView,\n)\n\napp_name = \"partners\"\n\nurlpatterns = [\n path(\n \"partners/events/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-events-list\", permanent=False\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-event-detail\", permanent=False\n ),\n name=\"partner-events-detail\",\n ),\n path(\"partners/vacancies/\", VacancyListView.as_view(), name=\"vacancies-list\"),\n path(\n \"partners/vacancies/categories/\",\n VacancyCategoryListView.as_view(),\n name=\"vacancy-categories-list\",\n ),\n path(\n \"partners/vacancies/<int:pk>/\",\n VacancyDetailView.as_view(),\n name=\"vacancies-detail\",\n ),\n path(\"partners/\", PartnerListView.as_view(), name=\"partners-list\"),\n path(\"partners/<int:pk>/\", PartnerDetailView.as_view(), name=\"partners-detail\"),\n]\n", "path": "website/partners/api/v2/urls.py"}], "after_files": [{"content": "\"\"\"Partners app API v2 urls.\"\"\"\nfrom django.urls import path\nfrom django.views.generic import RedirectView\n\nfrom partners.api.v2.views import (\n PartnerDetailView,\n PartnerListView,\n VacancyCategoryListView,\n VacancyDetailView,\n VacancyListView,\n)\n\napp_name = \"partners\"\n\nurlpatterns = [\n path(\n \"partners/events/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-events-list\",\n permanent=False,\n query_string=True,\n ),\n name=\"partner-events-list\",\n ),\n path(\n \"partners/events/<int:pk>/\",\n RedirectView.as_view(\n pattern_name=\"api:v2:events:external-event-detail\",\n permanent=False,\n query_string=True,\n ),\n name=\"partner-events-detail\",\n ),\n path(\"partners/vacancies/\", VacancyListView.as_view(), name=\"vacancies-list\"),\n path(\n \"partners/vacancies/categories/\",\n VacancyCategoryListView.as_view(),\n name=\"vacancy-categories-list\",\n ),\n path(\n \"partners/vacancies/<int:pk>/\",\n VacancyDetailView.as_view(),\n name=\"vacancies-detail\",\n ),\n path(\"partners/\", PartnerListView.as_view(), name=\"partners-list\"),\n path(\"partners/<int:pk>/\", PartnerDetailView.as_view(), name=\"partners-detail\"),\n]\n", "path": "website/partners/api/v2/urls.py"}]}
| 763 | 208 |
gh_patches_debug_8139
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-5957
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve msg error for submodules with invalid url schemas
We only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/projects/exceptions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Project exceptions."""
4
5 from django.conf import settings
6 from django.utils.translation import ugettext_noop as _
7
8 from readthedocs.doc_builder.exceptions import BuildEnvironmentError
9
10
11 class ProjectConfigurationError(BuildEnvironmentError):
12
13 """Error raised trying to configure a project for build."""
14
15 NOT_FOUND = _(
16 'A configuration file was not found. '
17 'Make sure you have a conf.py file in your repository.',
18 )
19
20 MULTIPLE_CONF_FILES = _(
21 'We found more than one conf.py and are not sure which one to use. '
22 'Please, specify the correct file under the Advanced settings tab '
23 "in the project's Admin.",
24 )
25
26
27 class RepositoryError(BuildEnvironmentError):
28
29 """Failure during repository operation."""
30
31 PRIVATE_ALLOWED = _(
32 'There was a problem connecting to your repository, '
33 'ensure that your repository URL is correct.',
34 )
35 PRIVATE_NOT_ALLOWED = _(
36 'There was a problem connecting to your repository, '
37 'ensure that your repository URL is correct and your repository is public. '
38 'Private repositories are not supported.',
39 )
40
41 INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')
42 INVALID_SUBMODULES_PATH = _(
43 'One or more submodule paths are not valid. '
44 'Check that all your submodules in .gitmodules are used.'
45 )
46
47 DUPLICATED_RESERVED_VERSIONS = _(
48 'You can not have two versions with the name latest or stable.',
49 )
50
51 FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')
52
53 def get_default_message(self):
54 if settings.ALLOW_PRIVATE_REPOS:
55 return self.PRIVATE_ALLOWED
56 return self.PRIVATE_NOT_ALLOWED
57
58
59 class ProjectSpamError(Exception):
60
61 """
62 Error raised when a project field has detected spam.
63
64 This error is not raised to users, we use this for banning users in the
65 background.
66 """
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py
--- a/readthedocs/projects/exceptions.py
+++ b/readthedocs/projects/exceptions.py
@@ -38,7 +38,10 @@
'Private repositories are not supported.',
)
- INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')
+ INVALID_SUBMODULES = _(
+ 'One or more submodule URLs are not valid: {}, '
+ 'git/ssh URL schemas for submodules are not supported.'
+ )
INVALID_SUBMODULES_PATH = _(
'One or more submodule paths are not valid. '
'Check that all your submodules in .gitmodules are used.'
|
{"golden_diff": "diff --git a/readthedocs/projects/exceptions.py b/readthedocs/projects/exceptions.py\n--- a/readthedocs/projects/exceptions.py\n+++ b/readthedocs/projects/exceptions.py\n@@ -38,7 +38,10 @@\n 'Private repositories are not supported.',\n )\n \n- INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n+ INVALID_SUBMODULES = _(\n+ 'One or more submodule URLs are not valid: {}, '\n+ 'git/ssh URL schemas for submodules are not supported.'\n+ )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n", "issue": "Improve msg error for submodules with invalid url schemas\nWe only support `http` schemas, but the error message isn't clear about that. Here is a good suggestion: https://github.com/readthedocs/readthedocs.org/issues/5928#issuecomment-511708197\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _('One or more submodule URLs are not valid: {}.')\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Project exceptions.\"\"\"\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_noop as _\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentError\n\n\nclass ProjectConfigurationError(BuildEnvironmentError):\n\n \"\"\"Error raised trying to configure a project for build.\"\"\"\n\n NOT_FOUND = _(\n 'A configuration file was not found. '\n 'Make sure you have a conf.py file in your repository.',\n )\n\n MULTIPLE_CONF_FILES = _(\n 'We found more than one conf.py and are not sure which one to use. '\n 'Please, specify the correct file under the Advanced settings tab '\n \"in the project's Admin.\",\n )\n\n\nclass RepositoryError(BuildEnvironmentError):\n\n \"\"\"Failure during repository operation.\"\"\"\n\n PRIVATE_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct.',\n )\n PRIVATE_NOT_ALLOWED = _(\n 'There was a problem connecting to your repository, '\n 'ensure that your repository URL is correct and your repository is public. '\n 'Private repositories are not supported.',\n )\n\n INVALID_SUBMODULES = _(\n 'One or more submodule URLs are not valid: {}, '\n 'git/ssh URL schemas for submodules are not supported.'\n )\n INVALID_SUBMODULES_PATH = _(\n 'One or more submodule paths are not valid. '\n 'Check that all your submodules in .gitmodules are used.'\n )\n\n DUPLICATED_RESERVED_VERSIONS = _(\n 'You can not have two versions with the name latest or stable.',\n )\n\n FAILED_TO_CHECKOUT = _('Failed to checkout revision: {}')\n\n def get_default_message(self):\n if settings.ALLOW_PRIVATE_REPOS:\n return self.PRIVATE_ALLOWED\n return self.PRIVATE_NOT_ALLOWED\n\n\nclass ProjectSpamError(Exception):\n\n \"\"\"\n Error raised when a project field has detected spam.\n\n This error is not raised to users, we use this for banning users in the\n background.\n \"\"\"\n", "path": "readthedocs/projects/exceptions.py"}]}
| 877 | 160 |
gh_patches_debug_16298
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-612
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DeprecationWarning in sqlalchemy integration
At integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/sqlalchemy.py`
Content:
```
1 from __future__ import absolute_import
2
3 from sentry_sdk._types import MYPY
4 from sentry_sdk.hub import Hub
5 from sentry_sdk.integrations import Integration
6 from sentry_sdk.tracing import record_sql_queries
7
8 from sqlalchemy.engine import Engine # type: ignore
9 from sqlalchemy.event import listen # type: ignore
10
11 if MYPY:
12 from typing import Any
13 from typing import ContextManager
14 from typing import Optional
15
16 from sentry_sdk.tracing import Span
17
18
19 class SqlalchemyIntegration(Integration):
20 identifier = "sqlalchemy"
21
22 @staticmethod
23 def setup_once():
24 # type: () -> None
25
26 listen(Engine, "before_cursor_execute", _before_cursor_execute)
27 listen(Engine, "after_cursor_execute", _after_cursor_execute)
28 listen(Engine, "dbapi_error", _dbapi_error)
29
30
31 def _before_cursor_execute(
32 conn, cursor, statement, parameters, context, executemany, *args
33 ):
34 # type: (Any, Any, Any, Any, Any, bool, *Any) -> None
35 hub = Hub.current
36 if hub.get_integration(SqlalchemyIntegration) is None:
37 return
38
39 ctx_mgr = record_sql_queries(
40 hub,
41 cursor,
42 statement,
43 parameters,
44 paramstyle=context and context.dialect and context.dialect.paramstyle or None,
45 executemany=executemany,
46 )
47 conn._sentry_sql_span_manager = ctx_mgr
48
49 span = ctx_mgr.__enter__()
50
51 if span is not None:
52 conn._sentry_sql_span = span
53
54
55 def _after_cursor_execute(conn, cursor, statement, *args):
56 # type: (Any, Any, Any, *Any) -> None
57 ctx_mgr = getattr(
58 conn, "_sentry_sql_span_manager", None
59 ) # type: ContextManager[Any]
60
61 if ctx_mgr is not None:
62 conn._sentry_sql_span_manager = None
63 ctx_mgr.__exit__(None, None, None)
64
65
66 def _dbapi_error(conn, *args):
67 # type: (Any, *Any) -> None
68 span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span]
69
70 if span is not None:
71 span.set_status("internal_error")
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py
--- a/sentry_sdk/integrations/sqlalchemy.py
+++ b/sentry_sdk/integrations/sqlalchemy.py
@@ -25,7 +25,7 @@
listen(Engine, "before_cursor_execute", _before_cursor_execute)
listen(Engine, "after_cursor_execute", _after_cursor_execute)
- listen(Engine, "dbapi_error", _dbapi_error)
+ listen(Engine, "handle_error", _handle_error)
def _before_cursor_execute(
@@ -63,8 +63,9 @@
ctx_mgr.__exit__(None, None, None)
-def _dbapi_error(conn, *args):
+def _handle_error(context, *args):
# type: (Any, *Any) -> None
+ conn = context.connection
span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span]
if span is not None:
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py\n--- a/sentry_sdk/integrations/sqlalchemy.py\n+++ b/sentry_sdk/integrations/sqlalchemy.py\n@@ -25,7 +25,7 @@\n \n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n- listen(Engine, \"dbapi_error\", _dbapi_error)\n+ listen(Engine, \"handle_error\", _handle_error)\n \n \n def _before_cursor_execute(\n@@ -63,8 +63,9 @@\n ctx_mgr.__exit__(None, None, None)\n \n \n-def _dbapi_error(conn, *args):\n+def _handle_error(context, *args):\n # type: (Any, *Any) -> None\n+ conn = context.connection\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n \n if span is not None:\n", "issue": "DeprecationWarning in sqlalchemy integration\nAt integrations/sqlalchemy.py line 28 - integration is listening for dbapi_error, this was deprecated in sqlalchemy a long time ago and should be replaced with handle_error event\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.tracing import record_sql_queries\n\nfrom sqlalchemy.engine import Engine # type: ignore\nfrom sqlalchemy.event import listen # type: ignore\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"dbapi_error\", _dbapi_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n conn._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n conn._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, *args):\n # type: (Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n conn, \"_sentry_sql_span_manager\", None\n ) # type: ContextManager[Any]\n\n if ctx_mgr is not None:\n conn._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _dbapi_error(conn, *args):\n # type: (Any, *Any) -> None\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.tracing import record_sql_queries\n\nfrom sqlalchemy.engine import Engine # type: ignore\nfrom sqlalchemy.event import listen # type: ignore\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"handle_error\", _handle_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n conn._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n conn._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, *args):\n # type: (Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n conn, \"_sentry_sql_span_manager\", None\n ) # type: ContextManager[Any]\n\n if ctx_mgr is not None:\n conn._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _handle_error(context, *args):\n # type: (Any, *Any) -> None\n conn = context.connection\n span = getattr(conn, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}]}
| 948 | 223 |
gh_patches_debug_1197
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-nlp-1166
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add compute_output_shape method to WordPieceTokenizer
When we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises
```
RuntimeError: Exception encountered when calling WordPieceTokenizer.call().
Could not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered:
'string'
Arguments received by WordPieceTokenizer.call():
• args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',)
• kwargs=<class 'inspect._empty'>
```
cc: @mattdangerw
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/tokenizers/tokenizer.py`
Content:
```
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import List
16
17 from keras_nlp.api_export import keras_nlp_export
18 from keras_nlp.layers.preprocessing.preprocessing_layer import (
19 PreprocessingLayer,
20 )
21
22
23 @keras_nlp_export("keras_nlp.tokenizers.Tokenizer")
24 class Tokenizer(PreprocessingLayer):
25 """A base class for tokenizer layers.
26
27 Tokenizers in the KerasNLP library should all subclass this layer.
28 The class provides two core methods `tokenize()` and `detokenize()` for
29 going from plain text to sequences and back. A tokenizer is a subclass of
30 `keras.layers.Layer` and can be combined into a `keras.Model`.
31
32 Subclassers should always implement the `tokenize()` method, which will also
33 be the default when calling the layer directly on inputs.
34
35 Subclassers can optionally implement the `detokenize()` method if the
36 tokenization is reversible. Otherwise, this can be skipped.
37
38 Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,
39 `token_to_id()` and `id_to_token()` if applicable. For some simple
40 "vocab free" tokenizers, such as a whitespace splitter show below, these
41 methods do not apply and can be skipped.
42
43 Examples:
44
45 ```python
46 class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):
47 def tokenize(self, inputs):
48 return tf.strings.split(inputs)
49
50 def detokenize(self, inputs):
51 return tf.strings.reduce_join(inputs, separator=" ", axis=-1)
52
53 tokenizer = WhitespaceSplitterTokenizer()
54
55 # Tokenize some inputs.
56 tokenizer.tokenize("This is a test")
57
58 # Shorthard for `tokenize()`.
59 tokenizer("This is a test")
60
61 # Detokenize some outputs.
62 tokenizer.detokenize(["This", "is", "a", "test"])
63 ```
64 """
65
66 def __init__(self, *args, **kwargs):
67 super().__init__(*args, **kwargs)
68
69 def tokenize(self, inputs, *args, **kwargs):
70 """Transform input tensors of strings into output tokens.
71
72 Args:
73 inputs: Input tensor, or dict/list/tuple of input tensors.
74 *args: Additional positional arguments.
75 **kwargs: Additional keyword arguments.
76 """
77 raise NotImplementedError(
78 "No implementation of `tokenize()` was found for "
79 f"{self.__class__.__name__}. All tokenizers should implement "
80 "`tokenize()`."
81 )
82
83 def detokenize(self, inputs, *args, **kwargs):
84 """Transform tokens back into strings.
85
86 Args:
87 inputs: Input tensor, or dict/list/tuple of input tensors.
88 *args: Additional positional arguments.
89 **kwargs: Additional keyword arguments.
90 """
91 raise NotImplementedError(
92 "No implementation of `detokenize()` was found for "
93 f"{self.__class__.__name__}."
94 )
95
96 def get_vocabulary(self) -> List[str]:
97 """Get the tokenizer vocabulary as a list of strings terms."""
98 raise NotImplementedError(
99 "No implementation of `get_vocabulary()` was found for "
100 f"{self.__class__.__name__}."
101 )
102
103 def vocabulary_size(self) -> int:
104 """Returns the total size of the token id space."""
105 raise NotImplementedError(
106 "No implementation of `vocabulary_size()` was found for "
107 f"{self.__class__.__name__}."
108 )
109
110 def id_to_token(self, id: int) -> str:
111 """Convert an integer id to a string token."""
112 raise NotImplementedError(
113 "No implementation of `id_to_token()` was found for "
114 f"{self.__class__.__name__}."
115 )
116
117 def token_to_id(self, token: str) -> int:
118 """Convert an integer id to a string token."""
119 raise NotImplementedError(
120 "No implementation of `id_to_token()` was found for "
121 f"{self.__class__.__name__}."
122 )
123
124 def call(self, inputs, *args, training=None, **kwargs):
125 return self.tokenize(inputs, *args, **kwargs)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py
--- a/keras_nlp/tokenizers/tokenizer.py
+++ b/keras_nlp/tokenizers/tokenizer.py
@@ -123,3 +123,6 @@
def call(self, inputs, *args, training=None, **kwargs):
return self.tokenize(inputs, *args, **kwargs)
+
+ def compute_output_shape(self, inputs_shape):
+ return tuple(inputs_shape) + (self.sequence_length,)
|
{"golden_diff": "diff --git a/keras_nlp/tokenizers/tokenizer.py b/keras_nlp/tokenizers/tokenizer.py\n--- a/keras_nlp/tokenizers/tokenizer.py\n+++ b/keras_nlp/tokenizers/tokenizer.py\n@@ -123,3 +123,6 @@\n \n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n+\n+ def compute_output_shape(self, inputs_shape):\n+ return tuple(inputs_shape) + (self.sequence_length,)\n", "issue": "Add compute_output_shape method to WordPieceTokenizer\nWhen we run Pretraining Transformer from Scratch guide with PyTorch and JAX backend, it raises\r\n\r\n```\r\n\r\nRuntimeError: Exception encountered when calling WordPieceTokenizer.call().\r\n\r\nCould not automatically infer the output shape / dtype of 'word_piece_tokenizer_1' (of type WordPieceTokenizer). Either the `WordPieceTokenizer.call()` method is incorrect, or you need to implement the `WordPieceTokenizer.compute_output_spec() / compute_output_shape()` method. Error encountered:\r\n\r\n'string'\r\n\r\nArguments received by WordPieceTokenizer.call():\r\n \u2022 args=('<KerasTensor shape=(None,), dtype=string, name=keras_tensor_59>',)\r\n \u2022 kwargs=<class 'inspect._empty'>\r\n```\r\n\r\ncc: @mattdangerw \n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n", "path": "keras_nlp/tokenizers/tokenizer.py"}], "after_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.preprocessing.preprocessing_layer import (\n PreprocessingLayer,\n)\n\n\n@keras_nlp_export(\"keras_nlp.tokenizers.Tokenizer\")\nclass Tokenizer(PreprocessingLayer):\n \"\"\"A base class for tokenizer layers.\n\n Tokenizers in the KerasNLP library should all subclass this layer.\n The class provides two core methods `tokenize()` and `detokenize()` for\n going from plain text to sequences and back. A tokenizer is a subclass of\n `keras.layers.Layer` and can be combined into a `keras.Model`.\n\n Subclassers should always implement the `tokenize()` method, which will also\n be the default when calling the layer directly on inputs.\n\n Subclassers can optionally implement the `detokenize()` method if the\n tokenization is reversible. Otherwise, this can be skipped.\n\n Subclassers should implement `get_vocabulary()`, `vocabulary_size()`,\n `token_to_id()` and `id_to_token()` if applicable. For some simple\n \"vocab free\" tokenizers, such as a whitespace splitter show below, these\n methods do not apply and can be skipped.\n\n Examples:\n\n ```python\n class WhitespaceSplitterTokenizer(keras_nlp.tokenizers.Tokenizer):\n def tokenize(self, inputs):\n return tf.strings.split(inputs)\n\n def detokenize(self, inputs):\n return tf.strings.reduce_join(inputs, separator=\" \", axis=-1)\n\n tokenizer = WhitespaceSplitterTokenizer()\n\n # Tokenize some inputs.\n tokenizer.tokenize(\"This is a test\")\n\n # Shorthard for `tokenize()`.\n tokenizer(\"This is a test\")\n\n # Detokenize some outputs.\n tokenizer.detokenize([\"This\", \"is\", \"a\", \"test\"])\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def tokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform input tensors of strings into output tokens.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `tokenize()` was found for \"\n f\"{self.__class__.__name__}. All tokenizers should implement \"\n \"`tokenize()`.\"\n )\n\n def detokenize(self, inputs, *args, **kwargs):\n \"\"\"Transform tokens back into strings.\n\n Args:\n inputs: Input tensor, or dict/list/tuple of input tensors.\n *args: Additional positional arguments.\n **kwargs: Additional keyword arguments.\n \"\"\"\n raise NotImplementedError(\n \"No implementation of `detokenize()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def get_vocabulary(self) -> List[str]:\n \"\"\"Get the tokenizer vocabulary as a list of strings terms.\"\"\"\n raise NotImplementedError(\n \"No implementation of `get_vocabulary()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def vocabulary_size(self) -> int:\n \"\"\"Returns the total size of the token id space.\"\"\"\n raise NotImplementedError(\n \"No implementation of `vocabulary_size()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def id_to_token(self, id: int) -> str:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def token_to_id(self, token: str) -> int:\n \"\"\"Convert an integer id to a string token.\"\"\"\n raise NotImplementedError(\n \"No implementation of `id_to_token()` was found for \"\n f\"{self.__class__.__name__}.\"\n )\n\n def call(self, inputs, *args, training=None, **kwargs):\n return self.tokenize(inputs, *args, **kwargs)\n\n def compute_output_shape(self, inputs_shape):\n return tuple(inputs_shape) + (self.sequence_length,)\n", "path": "keras_nlp/tokenizers/tokenizer.py"}]}
| 1,712 | 120 |
gh_patches_debug_53354
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-894
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Nevergrad sweeper does not work with integers if there are less than 6 choices
Nevergrad sweeper complains if it has less than 6 values to sweep over in a range (e.g. `lower: 1` `upper:3`) and asks to use a list instead (`ValueError: For integers with 6 or fewer values, use a choice instead`). But if you use a list with integers it does not work because it assumes that choices contain only strings:

Line where the first error is raised: https://github.com/facebookresearch/hydra/blob/0e001afb2a55275b6f7dc33e79035dbf3a797c00/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py#L178
Hydra Version: 1.0.0rc2
Of course I can give a string and then convert in my code, but it would probably be better to solve it differently? For example sliently treating it as a list without raising the first error ? Or at least to say in the raised error that you have to use a list and convert the str to int in your own code ? Not sure what is the best way..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import itertools
3 import logging
4 from dataclasses import dataclass
5 from typing import Any, Dict, List, Optional, Tuple
6
7 from hydra.core.config_loader import ConfigLoader
8 from hydra.core.plugins import Plugins
9 from hydra.plugins.launcher import Launcher
10 from hydra.plugins.sweeper import Sweeper
11 from hydra.types import TaskFunction
12 from omegaconf import DictConfig, ListConfig, OmegaConf
13
14 from .config import OptimConf, ScalarConfigSpec
15
16 # pylint: disable=logging-fstring-interpolation,no-self-used
17 log = logging.getLogger(__name__)
18
19
20 @dataclass
21 class CommandlineSpec:
22 """Structured commandline specification
23 for sweepers handling categorical variables and bounded variables
24
25 Attributes
26 ----------
27 bounds: Optional[Tuple[float, float]]
28 if present, this defines a bounded scalar between bounds[0]
29 and bounds[1]
30 options: Optional[List[Any]]
31 if present, this defines the options/choices of a categorical
32 variable
33 cast: str
34 the name of the variable type to cast it to ("int", "str"
35 or "float")
36 log: bool
37 for bounded scalars, whether it is log-distributed
38
39 Note
40 ----
41 Exactly one of bounds or options must be provided
42 """
43
44 bounds: Optional[Tuple[float, float]] = None
45 options: Optional[List[str]] = None
46 cast: str = "float"
47 log: bool = False
48
49 def __post_init__(self) -> None:
50 if not (self.bounds is None) ^ (self.options is None):
51 raise ValueError("Exactly one of bounds or options must be specified")
52 if self.bounds is not None:
53 if self.cast == "str":
54 raise ValueError(
55 "Inconsistent specifications 'str' for bounded values."
56 )
57 if self.bounds[0] > self.bounds[1]:
58 raise ValueError(f"Bounds must be ordered, but got {self.bounds}")
59 if self.options is not None and self.log:
60 raise ValueError("Inconsistent 'log' specification for choice parameter")
61
62 @classmethod
63 def parse(cls, string: str) -> "CommandlineSpec":
64 """Parses a commandline argument string
65
66 Parameter
67 ---------
68 string: str
69 This can be:
70 - comma-separated values: for a choice parameter
71 Eg.: "a,b,c"
72 - colon-separated values for ranges of scalars.
73 Eg.: "0:10"
74 Colon-separeted can be appended to:
75 - cast to int/str/float (always defaults to float):
76 Eg: "float:0,4,10", "int:0:10"
77 - set log distribution for scalars
78 Eg: "int:log:4:1024"
79 """
80 available_modifiers = {"log", "float", "int", "str"}
81 colon_split = string.split(":")
82 modifiers = set(
83 itertools.takewhile(available_modifiers.__contains__, colon_split)
84 )
85 remain = colon_split[len(modifiers) :]
86 casts = list(modifiers - {"log"})
87 if len(remain) not in {1, 2}:
88 raise ValueError(
89 "Can't interpret non-speficiations: {}.\nthis needs to be "
90 "either colon or coma-separated values".format(":".join(remain))
91 )
92 if len(casts) > 1:
93 raise ValueError(f"Inconsistent specifications: {casts}")
94 if len(remain) == 1: # choice argument
95 cast = casts[0] if casts else "str"
96 options = remain[0].split(",")
97 if len(options) < 2:
98 raise ValueError("At least 2 options are required")
99 if not casts:
100 try: # default to float if possible and no spec provided
101 _ = [float(x) for x in options]
102 cast = "float"
103 except ValueError:
104 pass
105 return cls(options=options, cast=cast)
106 # bounded argument
107 bounds: Tuple[float, float] = tuple(float(x) for x in remain) # type: ignore
108 cast = casts[0] if casts else "float"
109 return cls(bounds=bounds, cast=cast, log="log" in modifiers)
110
111
112 # pylint: disable=too-many-branches
113 def make_nevergrad_parameter(description: Any) -> Any:
114 """Returns a Nevergrad parameter from a definition string or object.
115
116 Parameters
117 ----------
118 description: Any
119 * a commandline definition string. This can be:
120 - comma-separated values: for a choice parameter
121 Eg.: "a,b,c"
122 Note: sequences of increasing scalars provide a specific parametrization
123 compared to unordered categorical values
124 - ":"-separated values for ranges of scalars.
125 "int" and/or "log" modifiers can be added in front to cast to integer or
126 use log-distributed values (Eg: int:log:4:1024)
127 - anything else will be treated as a constant string
128 * a config definition dict for scalar parameters, with potential fields
129 init, lower, upper, step, log, integer
130 * a list for option parameters defined in config file
131
132 Returns
133 -------
134 Parameter or str
135 A Parameter if the string fitted one of the definitions, else the input string
136 """
137 # lazy initialization to avoid overhead when loading hydra
138 import nevergrad as ng
139
140 # revert config parsing
141
142 if isinstance(description, (ListConfig, list)):
143 description = ",".join(description)
144 if isinstance(description, str):
145 # cast to spec if possible
146 try:
147 description = CommandlineSpec.parse(description)
148 except ValueError:
149 pass
150 # convert scalar commandline specs to dict
151 if isinstance(description, CommandlineSpec) and description.bounds is not None:
152 description = ScalarConfigSpec(
153 lower=description.bounds[0],
154 upper=description.bounds[1],
155 log=description.log,
156 integer=description.cast == "int",
157 )
158 # convert scalar config specs to dict
159 # convert dict to Scalar parameter instance
160 if isinstance(description, (dict, DictConfig)):
161 description = ScalarConfigSpec(**description)
162 if isinstance(description, ScalarConfigSpec):
163 init = ["init", "lower", "upper"]
164 init_params = {x: getattr(description, x) for x in init}
165 if not description.log:
166 scalar = ng.p.Scalar(**init_params)
167 if description.step is not None:
168 scalar.set_mutation(sigma=description.step)
169 else:
170 if description.step is not None:
171 init_params["exponent"] = description.step
172 scalar = ng.p.Log(**init_params)
173 if description.integer:
174 scalar.set_integer_casting()
175 a, b = scalar.bounds
176 if a is not None and b is not None and b - a <= 6:
177 raise ValueError(
178 "For integers with 6 or fewer values, use a choice instead"
179 )
180 return scalar
181 # choices
182 if isinstance(description, CommandlineSpec):
183 assert description.options is not None
184 caster = {"int": int, "str": str, "float": float}[description.cast]
185 choices = [caster(x) for x in description.options]
186 ordered = all(isinstance(c, (int, float)) for c in choices)
187 ordered &= all(c0 <= c1 for c0, c1 in zip(choices[:-1], choices[1:]))
188 return ng.p.TransitionChoice(choices) if ordered else ng.p.Choice(choices)
189 # constant
190 if isinstance(description, (str, int, float)):
191 return description
192 raise TypeError(f"Unexpected parameter configuration: {description}")
193
194
195 class NevergradSweeper(Sweeper):
196 """Returns a Nevergrad parameter from a definition string.
197
198 Parameters
199 ----------
200 config: DictConfig
201 the optimization process configuration
202 version: int
203 version of the API
204 """
205
206 def __init__(
207 self, optim: OptimConf, version: int, parametrization: Optional[DictConfig],
208 ):
209 assert (
210 version == 1
211 ), f"Only version 1 of API is currently available (got {version})"
212 self.opt_config = optim
213 self.config: Optional[DictConfig] = None
214 self.launcher: Optional[Launcher] = None
215 self.job_results = None
216 self.parametrization: Dict[str, Any] = {}
217 if parametrization is not None:
218 assert isinstance(parametrization, DictConfig)
219 self.parametrization = {
220 x: make_nevergrad_parameter(y) for x, y in parametrization.items()
221 }
222 self.job_idx: Optional[int] = None
223
224 def setup(
225 self,
226 config: DictConfig,
227 config_loader: ConfigLoader,
228 task_function: TaskFunction,
229 ) -> None:
230 self.job_idx = 0
231 self.config = config
232 self.config_loader = config_loader
233 self.launcher = Plugins.instance().instantiate_launcher(
234 config=config, config_loader=config_loader, task_function=task_function
235 )
236
237 def sweep(self, arguments: List[str]) -> None:
238 # lazy initialization to avoid overhead when loading hydra
239 import nevergrad as ng
240
241 assert self.config is not None
242 assert self.launcher is not None
243 assert self.job_idx is not None
244 direction = -1 if self.opt_config.maximize else 1
245 name = "maximization" if self.opt_config.maximize else "minimization"
246 # Override the parametrization from commandline
247 params = dict(self.parametrization)
248 for s in arguments:
249 key, value = s.split("=", 1)
250 params[key] = make_nevergrad_parameter(value)
251 parametrization = ng.p.Dict(**params)
252 parametrization.descriptors.deterministic_function = not self.opt_config.noisy
253 parametrization.random_state.seed(self.opt_config.seed)
254 # log and build the optimizer
255 opt = self.opt_config.optimizer
256 remaining_budget = self.opt_config.budget
257 nw = self.opt_config.num_workers
258 log.info(
259 f"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, "
260 f"num_workers={nw}) {name}"
261 )
262 log.info(f"with parametrization {parametrization}")
263 log.info(f"Sweep output dir: {self.config.hydra.sweep.dir}")
264 optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)
265 # loop!
266 all_returns: List[Any] = []
267 best: Tuple[float, ng.p.Parameter] = (float("inf"), parametrization)
268 while remaining_budget > 0:
269 batch = min(nw, remaining_budget)
270 remaining_budget -= batch
271 candidates = [optimizer.ask() for _ in range(batch)]
272 overrides = list(
273 tuple(f"{x}={y}" for x, y in c.value.items()) for c in candidates
274 )
275 self.validate_batch_is_legal(overrides)
276 returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)
277 self.job_idx += len(returns)
278 # would have been nice to avoid waiting for all jobs to finish
279 # aka batch size Vs steady state (launching a new job whenever one is done)
280 for cand, ret in zip(candidates, returns):
281 loss = direction * ret.return_value
282 optimizer.tell(cand, loss)
283 if loss < best[0]:
284 best = (loss, cand)
285 all_returns.extend(returns)
286 recom = optimizer.provide_recommendation()
287 results_to_serialize = {
288 "name": "nevergrad",
289 "best_evaluated_params": best[1].value,
290 "best_evaluated_result": direction * best[0],
291 }
292 OmegaConf.save(
293 OmegaConf.create(results_to_serialize),
294 f"{self.config.hydra.sweep.dir}/optimization_results.yaml",
295 )
296 log.info(
297 "Best parameters: %s", " ".join(f"{x}={y}" for x, y in recom.value.items())
298 )
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py
--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py
+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py
@@ -140,7 +140,7 @@
# revert config parsing
if isinstance(description, (ListConfig, list)):
- description = ",".join(description)
+ description = ",".join(str(x) for x in description)
if isinstance(description, str):
# cast to spec if possible
try:
|
{"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py\n--- a/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py\n+++ b/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py\n@@ -140,7 +140,7 @@\n # revert config parsing\n \n if isinstance(description, (ListConfig, list)):\n- description = \",\".join(description)\n+ description = \",\".join(str(x) for x in description)\n if isinstance(description, str):\n # cast to spec if possible\n try:\n", "issue": "[Bug]: Nevergrad sweeper does not work with integers if there are less than 6 choices\nNevergrad sweeper complains if it has less than 6 values to sweep over in a range (e.g. `lower: 1` `upper:3`) and asks to use a list instead (`ValueError: For integers with 6 or fewer values, use a choice instead`). But if you use a list with integers it does not work because it assumes that choices contain only strings:\r\n\r\n\r\n\r\nLine where the first error is raised: https://github.com/facebookresearch/hydra/blob/0e001afb2a55275b6f7dc33e79035dbf3a797c00/plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py#L178\r\nHydra Version: 1.0.0rc2\r\n\r\n\r\nOf course I can give a string and then convert in my code, but it would probably be better to solve it differently? For example sliently treating it as a list without raising the first error ? Or at least to say in the raised error that you have to use a list and convert the str to int in your own code ? Not sure what is the best way..\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.plugins import Plugins\nfrom hydra.plugins.launcher import Launcher\nfrom hydra.plugins.sweeper import Sweeper\nfrom hydra.types import TaskFunction\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\n\nfrom .config import OptimConf, ScalarConfigSpec\n\n# pylint: disable=logging-fstring-interpolation,no-self-used\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass CommandlineSpec:\n \"\"\"Structured commandline specification\n for sweepers handling categorical variables and bounded variables\n\n Attributes\n ----------\n bounds: Optional[Tuple[float, float]]\n if present, this defines a bounded scalar between bounds[0]\n and bounds[1]\n options: Optional[List[Any]]\n if present, this defines the options/choices of a categorical\n variable\n cast: str\n the name of the variable type to cast it to (\"int\", \"str\"\n or \"float\")\n log: bool\n for bounded scalars, whether it is log-distributed\n\n Note\n ----\n Exactly one of bounds or options must be provided\n \"\"\"\n\n bounds: Optional[Tuple[float, float]] = None\n options: Optional[List[str]] = None\n cast: str = \"float\"\n log: bool = False\n\n def __post_init__(self) -> None:\n if not (self.bounds is None) ^ (self.options is None):\n raise ValueError(\"Exactly one of bounds or options must be specified\")\n if self.bounds is not None:\n if self.cast == \"str\":\n raise ValueError(\n \"Inconsistent specifications 'str' for bounded values.\"\n )\n if self.bounds[0] > self.bounds[1]:\n raise ValueError(f\"Bounds must be ordered, but got {self.bounds}\")\n if self.options is not None and self.log:\n raise ValueError(\"Inconsistent 'log' specification for choice parameter\")\n\n @classmethod\n def parse(cls, string: str) -> \"CommandlineSpec\":\n \"\"\"Parses a commandline argument string\n\n Parameter\n ---------\n string: str\n This can be:\n - comma-separated values: for a choice parameter\n Eg.: \"a,b,c\"\n - colon-separated values for ranges of scalars.\n Eg.: \"0:10\"\n Colon-separeted can be appended to:\n - cast to int/str/float (always defaults to float):\n Eg: \"float:0,4,10\", \"int:0:10\"\n - set log distribution for scalars\n Eg: \"int:log:4:1024\"\n \"\"\"\n available_modifiers = {\"log\", \"float\", \"int\", \"str\"}\n colon_split = string.split(\":\")\n modifiers = set(\n itertools.takewhile(available_modifiers.__contains__, colon_split)\n )\n remain = colon_split[len(modifiers) :]\n casts = list(modifiers - {\"log\"})\n if len(remain) not in {1, 2}:\n raise ValueError(\n \"Can't interpret non-speficiations: {}.\\nthis needs to be \"\n \"either colon or coma-separated values\".format(\":\".join(remain))\n )\n if len(casts) > 1:\n raise ValueError(f\"Inconsistent specifications: {casts}\")\n if len(remain) == 1: # choice argument\n cast = casts[0] if casts else \"str\"\n options = remain[0].split(\",\")\n if len(options) < 2:\n raise ValueError(\"At least 2 options are required\")\n if not casts:\n try: # default to float if possible and no spec provided\n _ = [float(x) for x in options]\n cast = \"float\"\n except ValueError:\n pass\n return cls(options=options, cast=cast)\n # bounded argument\n bounds: Tuple[float, float] = tuple(float(x) for x in remain) # type: ignore\n cast = casts[0] if casts else \"float\"\n return cls(bounds=bounds, cast=cast, log=\"log\" in modifiers)\n\n\n# pylint: disable=too-many-branches\ndef make_nevergrad_parameter(description: Any) -> Any:\n \"\"\"Returns a Nevergrad parameter from a definition string or object.\n\n Parameters\n ----------\n description: Any\n * a commandline definition string. This can be:\n - comma-separated values: for a choice parameter\n Eg.: \"a,b,c\"\n Note: sequences of increasing scalars provide a specific parametrization\n compared to unordered categorical values\n - \":\"-separated values for ranges of scalars.\n \"int\" and/or \"log\" modifiers can be added in front to cast to integer or\n use log-distributed values (Eg: int:log:4:1024)\n - anything else will be treated as a constant string\n * a config definition dict for scalar parameters, with potential fields\n init, lower, upper, step, log, integer\n * a list for option parameters defined in config file\n\n Returns\n -------\n Parameter or str\n A Parameter if the string fitted one of the definitions, else the input string\n \"\"\"\n # lazy initialization to avoid overhead when loading hydra\n import nevergrad as ng\n\n # revert config parsing\n\n if isinstance(description, (ListConfig, list)):\n description = \",\".join(description)\n if isinstance(description, str):\n # cast to spec if possible\n try:\n description = CommandlineSpec.parse(description)\n except ValueError:\n pass\n # convert scalar commandline specs to dict\n if isinstance(description, CommandlineSpec) and description.bounds is not None:\n description = ScalarConfigSpec(\n lower=description.bounds[0],\n upper=description.bounds[1],\n log=description.log,\n integer=description.cast == \"int\",\n )\n # convert scalar config specs to dict\n # convert dict to Scalar parameter instance\n if isinstance(description, (dict, DictConfig)):\n description = ScalarConfigSpec(**description)\n if isinstance(description, ScalarConfigSpec):\n init = [\"init\", \"lower\", \"upper\"]\n init_params = {x: getattr(description, x) for x in init}\n if not description.log:\n scalar = ng.p.Scalar(**init_params)\n if description.step is not None:\n scalar.set_mutation(sigma=description.step)\n else:\n if description.step is not None:\n init_params[\"exponent\"] = description.step\n scalar = ng.p.Log(**init_params)\n if description.integer:\n scalar.set_integer_casting()\n a, b = scalar.bounds\n if a is not None and b is not None and b - a <= 6:\n raise ValueError(\n \"For integers with 6 or fewer values, use a choice instead\"\n )\n return scalar\n # choices\n if isinstance(description, CommandlineSpec):\n assert description.options is not None\n caster = {\"int\": int, \"str\": str, \"float\": float}[description.cast]\n choices = [caster(x) for x in description.options]\n ordered = all(isinstance(c, (int, float)) for c in choices)\n ordered &= all(c0 <= c1 for c0, c1 in zip(choices[:-1], choices[1:]))\n return ng.p.TransitionChoice(choices) if ordered else ng.p.Choice(choices)\n # constant\n if isinstance(description, (str, int, float)):\n return description\n raise TypeError(f\"Unexpected parameter configuration: {description}\")\n\n\nclass NevergradSweeper(Sweeper):\n \"\"\"Returns a Nevergrad parameter from a definition string.\n\n Parameters\n ----------\n config: DictConfig\n the optimization process configuration\n version: int\n version of the API\n \"\"\"\n\n def __init__(\n self, optim: OptimConf, version: int, parametrization: Optional[DictConfig],\n ):\n assert (\n version == 1\n ), f\"Only version 1 of API is currently available (got {version})\"\n self.opt_config = optim\n self.config: Optional[DictConfig] = None\n self.launcher: Optional[Launcher] = None\n self.job_results = None\n self.parametrization: Dict[str, Any] = {}\n if parametrization is not None:\n assert isinstance(parametrization, DictConfig)\n self.parametrization = {\n x: make_nevergrad_parameter(y) for x, y in parametrization.items()\n }\n self.job_idx: Optional[int] = None\n\n def setup(\n self,\n config: DictConfig,\n config_loader: ConfigLoader,\n task_function: TaskFunction,\n ) -> None:\n self.job_idx = 0\n self.config = config\n self.config_loader = config_loader\n self.launcher = Plugins.instance().instantiate_launcher(\n config=config, config_loader=config_loader, task_function=task_function\n )\n\n def sweep(self, arguments: List[str]) -> None:\n # lazy initialization to avoid overhead when loading hydra\n import nevergrad as ng\n\n assert self.config is not None\n assert self.launcher is not None\n assert self.job_idx is not None\n direction = -1 if self.opt_config.maximize else 1\n name = \"maximization\" if self.opt_config.maximize else \"minimization\"\n # Override the parametrization from commandline\n params = dict(self.parametrization)\n for s in arguments:\n key, value = s.split(\"=\", 1)\n params[key] = make_nevergrad_parameter(value)\n parametrization = ng.p.Dict(**params)\n parametrization.descriptors.deterministic_function = not self.opt_config.noisy\n parametrization.random_state.seed(self.opt_config.seed)\n # log and build the optimizer\n opt = self.opt_config.optimizer\n remaining_budget = self.opt_config.budget\n nw = self.opt_config.num_workers\n log.info(\n f\"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, \"\n f\"num_workers={nw}) {name}\"\n )\n log.info(f\"with parametrization {parametrization}\")\n log.info(f\"Sweep output dir: {self.config.hydra.sweep.dir}\")\n optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)\n # loop!\n all_returns: List[Any] = []\n best: Tuple[float, ng.p.Parameter] = (float(\"inf\"), parametrization)\n while remaining_budget > 0:\n batch = min(nw, remaining_budget)\n remaining_budget -= batch\n candidates = [optimizer.ask() for _ in range(batch)]\n overrides = list(\n tuple(f\"{x}={y}\" for x, y in c.value.items()) for c in candidates\n )\n self.validate_batch_is_legal(overrides)\n returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)\n self.job_idx += len(returns)\n # would have been nice to avoid waiting for all jobs to finish\n # aka batch size Vs steady state (launching a new job whenever one is done)\n for cand, ret in zip(candidates, returns):\n loss = direction * ret.return_value\n optimizer.tell(cand, loss)\n if loss < best[0]:\n best = (loss, cand)\n all_returns.extend(returns)\n recom = optimizer.provide_recommendation()\n results_to_serialize = {\n \"name\": \"nevergrad\",\n \"best_evaluated_params\": best[1].value,\n \"best_evaluated_result\": direction * best[0],\n }\n OmegaConf.save(\n OmegaConf.create(results_to_serialize),\n f\"{self.config.hydra.sweep.dir}/optimization_results.yaml\",\n )\n log.info(\n \"Best parameters: %s\", \" \".join(f\"{x}={y}\" for x, y in recom.value.items())\n )\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom hydra.core.config_loader import ConfigLoader\nfrom hydra.core.plugins import Plugins\nfrom hydra.plugins.launcher import Launcher\nfrom hydra.plugins.sweeper import Sweeper\nfrom hydra.types import TaskFunction\nfrom omegaconf import DictConfig, ListConfig, OmegaConf\n\nfrom .config import OptimConf, ScalarConfigSpec\n\n# pylint: disable=logging-fstring-interpolation,no-self-used\nlog = logging.getLogger(__name__)\n\n\n@dataclass\nclass CommandlineSpec:\n \"\"\"Structured commandline specification\n for sweepers handling categorical variables and bounded variables\n\n Attributes\n ----------\n bounds: Optional[Tuple[float, float]]\n if present, this defines a bounded scalar between bounds[0]\n and bounds[1]\n options: Optional[List[Any]]\n if present, this defines the options/choices of a categorical\n variable\n cast: str\n the name of the variable type to cast it to (\"int\", \"str\"\n or \"float\")\n log: bool\n for bounded scalars, whether it is log-distributed\n\n Note\n ----\n Exactly one of bounds or options must be provided\n \"\"\"\n\n bounds: Optional[Tuple[float, float]] = None\n options: Optional[List[str]] = None\n cast: str = \"float\"\n log: bool = False\n\n def __post_init__(self) -> None:\n if not (self.bounds is None) ^ (self.options is None):\n raise ValueError(\"Exactly one of bounds or options must be specified\")\n if self.bounds is not None:\n if self.cast == \"str\":\n raise ValueError(\n \"Inconsistent specifications 'str' for bounded values.\"\n )\n if self.bounds[0] > self.bounds[1]:\n raise ValueError(f\"Bounds must be ordered, but got {self.bounds}\")\n if self.options is not None and self.log:\n raise ValueError(\"Inconsistent 'log' specification for choice parameter\")\n\n @classmethod\n def parse(cls, string: str) -> \"CommandlineSpec\":\n \"\"\"Parses a commandline argument string\n\n Parameter\n ---------\n string: str\n This can be:\n - comma-separated values: for a choice parameter\n Eg.: \"a,b,c\"\n - colon-separated values for ranges of scalars.\n Eg.: \"0:10\"\n Colon-separeted can be appended to:\n - cast to int/str/float (always defaults to float):\n Eg: \"float:0,4,10\", \"int:0:10\"\n - set log distribution for scalars\n Eg: \"int:log:4:1024\"\n \"\"\"\n available_modifiers = {\"log\", \"float\", \"int\", \"str\"}\n colon_split = string.split(\":\")\n modifiers = set(\n itertools.takewhile(available_modifiers.__contains__, colon_split)\n )\n remain = colon_split[len(modifiers) :]\n casts = list(modifiers - {\"log\"})\n if len(remain) not in {1, 2}:\n raise ValueError(\n \"Can't interpret non-speficiations: {}.\\nthis needs to be \"\n \"either colon or coma-separated values\".format(\":\".join(remain))\n )\n if len(casts) > 1:\n raise ValueError(f\"Inconsistent specifications: {casts}\")\n if len(remain) == 1: # choice argument\n cast = casts[0] if casts else \"str\"\n options = remain[0].split(\",\")\n if len(options) < 2:\n raise ValueError(\"At least 2 options are required\")\n if not casts:\n try: # default to float if possible and no spec provided\n _ = [float(x) for x in options]\n cast = \"float\"\n except ValueError:\n pass\n return cls(options=options, cast=cast)\n # bounded argument\n bounds: Tuple[float, float] = tuple(float(x) for x in remain) # type: ignore\n cast = casts[0] if casts else \"float\"\n return cls(bounds=bounds, cast=cast, log=\"log\" in modifiers)\n\n\n# pylint: disable=too-many-branches\ndef make_nevergrad_parameter(description: Any) -> Any:\n \"\"\"Returns a Nevergrad parameter from a definition string or object.\n\n Parameters\n ----------\n description: Any\n * a commandline definition string. This can be:\n - comma-separated values: for a choice parameter\n Eg.: \"a,b,c\"\n Note: sequences of increasing scalars provide a specific parametrization\n compared to unordered categorical values\n - \":\"-separated values for ranges of scalars.\n \"int\" and/or \"log\" modifiers can be added in front to cast to integer or\n use log-distributed values (Eg: int:log:4:1024)\n - anything else will be treated as a constant string\n * a config definition dict for scalar parameters, with potential fields\n init, lower, upper, step, log, integer\n * a list for option parameters defined in config file\n\n Returns\n -------\n Parameter or str\n A Parameter if the string fitted one of the definitions, else the input string\n \"\"\"\n # lazy initialization to avoid overhead when loading hydra\n import nevergrad as ng\n\n # revert config parsing\n\n if isinstance(description, (ListConfig, list)):\n description = \",\".join(str(x) for x in description)\n if isinstance(description, str):\n # cast to spec if possible\n try:\n description = CommandlineSpec.parse(description)\n except ValueError:\n pass\n # convert scalar commandline specs to dict\n if isinstance(description, CommandlineSpec) and description.bounds is not None:\n description = ScalarConfigSpec(\n lower=description.bounds[0],\n upper=description.bounds[1],\n log=description.log,\n integer=description.cast == \"int\",\n )\n # convert scalar config specs to dict\n # convert dict to Scalar parameter instance\n if isinstance(description, (dict, DictConfig)):\n description = ScalarConfigSpec(**description)\n if isinstance(description, ScalarConfigSpec):\n init = [\"init\", \"lower\", \"upper\"]\n init_params = {x: getattr(description, x) for x in init}\n if not description.log:\n scalar = ng.p.Scalar(**init_params)\n if description.step is not None:\n scalar.set_mutation(sigma=description.step)\n else:\n if description.step is not None:\n init_params[\"exponent\"] = description.step\n scalar = ng.p.Log(**init_params)\n if description.integer:\n scalar.set_integer_casting()\n a, b = scalar.bounds\n if a is not None and b is not None and b - a <= 6:\n raise ValueError(\n \"For integers with 6 or fewer values, use a choice instead\"\n )\n return scalar\n # choices\n if isinstance(description, CommandlineSpec):\n assert description.options is not None\n caster = {\"int\": int, \"str\": str, \"float\": float}[description.cast]\n choices = [caster(x) for x in description.options]\n ordered = all(isinstance(c, (int, float)) for c in choices)\n ordered &= all(c0 <= c1 for c0, c1 in zip(choices[:-1], choices[1:]))\n return ng.p.TransitionChoice(choices) if ordered else ng.p.Choice(choices)\n # constant\n if isinstance(description, (str, int, float)):\n return description\n raise TypeError(f\"Unexpected parameter configuration: {description}\")\n\n\nclass NevergradSweeper(Sweeper):\n \"\"\"Returns a Nevergrad parameter from a definition string.\n\n Parameters\n ----------\n config: DictConfig\n the optimization process configuration\n version: int\n version of the API\n \"\"\"\n\n def __init__(\n self, optim: OptimConf, version: int, parametrization: Optional[DictConfig],\n ):\n assert (\n version == 1\n ), f\"Only version 1 of API is currently available (got {version})\"\n self.opt_config = optim\n self.config: Optional[DictConfig] = None\n self.launcher: Optional[Launcher] = None\n self.job_results = None\n self.parametrization: Dict[str, Any] = {}\n if parametrization is not None:\n assert isinstance(parametrization, DictConfig)\n self.parametrization = {\n x: make_nevergrad_parameter(y) for x, y in parametrization.items()\n }\n self.job_idx: Optional[int] = None\n\n def setup(\n self,\n config: DictConfig,\n config_loader: ConfigLoader,\n task_function: TaskFunction,\n ) -> None:\n self.job_idx = 0\n self.config = config\n self.config_loader = config_loader\n self.launcher = Plugins.instance().instantiate_launcher(\n config=config, config_loader=config_loader, task_function=task_function\n )\n\n def sweep(self, arguments: List[str]) -> None:\n # lazy initialization to avoid overhead when loading hydra\n import nevergrad as ng\n\n assert self.config is not None\n assert self.launcher is not None\n assert self.job_idx is not None\n direction = -1 if self.opt_config.maximize else 1\n name = \"maximization\" if self.opt_config.maximize else \"minimization\"\n # Override the parametrization from commandline\n params = dict(self.parametrization)\n for s in arguments:\n key, value = s.split(\"=\", 1)\n params[key] = make_nevergrad_parameter(value)\n parametrization = ng.p.Dict(**params)\n parametrization.descriptors.deterministic_function = not self.opt_config.noisy\n parametrization.random_state.seed(self.opt_config.seed)\n # log and build the optimizer\n opt = self.opt_config.optimizer\n remaining_budget = self.opt_config.budget\n nw = self.opt_config.num_workers\n log.info(\n f\"NevergradSweeper(optimizer={opt}, budget={remaining_budget}, \"\n f\"num_workers={nw}) {name}\"\n )\n log.info(f\"with parametrization {parametrization}\")\n log.info(f\"Sweep output dir: {self.config.hydra.sweep.dir}\")\n optimizer = ng.optimizers.registry[opt](parametrization, remaining_budget, nw)\n # loop!\n all_returns: List[Any] = []\n best: Tuple[float, ng.p.Parameter] = (float(\"inf\"), parametrization)\n while remaining_budget > 0:\n batch = min(nw, remaining_budget)\n remaining_budget -= batch\n candidates = [optimizer.ask() for _ in range(batch)]\n overrides = list(\n tuple(f\"{x}={y}\" for x, y in c.value.items()) for c in candidates\n )\n self.validate_batch_is_legal(overrides)\n returns = self.launcher.launch(overrides, initial_job_idx=self.job_idx)\n self.job_idx += len(returns)\n # would have been nice to avoid waiting for all jobs to finish\n # aka batch size Vs steady state (launching a new job whenever one is done)\n for cand, ret in zip(candidates, returns):\n loss = direction * ret.return_value\n optimizer.tell(cand, loss)\n if loss < best[0]:\n best = (loss, cand)\n all_returns.extend(returns)\n recom = optimizer.provide_recommendation()\n results_to_serialize = {\n \"name\": \"nevergrad\",\n \"best_evaluated_params\": best[1].value,\n \"best_evaluated_result\": direction * best[0],\n }\n OmegaConf.save(\n OmegaConf.create(results_to_serialize),\n f\"{self.config.hydra.sweep.dir}/optimization_results.yaml\",\n )\n log.info(\n \"Best parameters: %s\", \" \".join(f\"{x}={y}\" for x, y in recom.value.items())\n )\n", "path": "plugins/hydra_nevergrad_sweeper/hydra_plugins/hydra_nevergrad_sweeper/core.py"}]}
| 4,054 | 185 |
gh_patches_debug_56864
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-2300
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Files are read as Models
**Describe the bug**
<!--- A clear and concise description of what the bug is. -->
When a .DS_Store file is created in the `bentoml/models` folder, I think `store.py`'s `list` method tries loading it. I think the check for directories isn't working as intended (https://github.com/bentoml/BentoML/blob/4308c67f327d9524025e5c348ded22553824c1d4/bentoml/_internal/store.py#L69), but I'm not sure. Additionally, this happens when any file is created in the models folder, but not when folders are created.
**To Reproduce**
Steps to reproduce the issue:
1. Go to `~/bentoml/models/`
2. Create a `.DS_Store` file
3. Run `bentoml models list`
4. See error
**Expected behavior**
<!--- A clear and concise description of what you expected to happen. -->
All files should be skipped when scanning for models.
**Screenshots/Logs**
```shell
[19:12:41] WARNING [boot] converting .DS_Store to lowercase: .ds_store
Traceback (most recent call last):
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/__main__.py", line 4, in <module>
create_bentoml_cli()()
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/cli/model_management.py", line 90, in list_models
models = model_store.list(model_name)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py", line 63, in list
return [
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py", line 64, in <listcomp>
ver for _d in sorted(self._fs.listdir("/")) for ver in self.list(_d)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py", line 67, in list
_tag = Tag.from_taglike(tag)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py", line 221, in from_taglike
return cls.from_str(taglike)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py", line 226, in from_str
return cls(tag_str, None)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py", line 180, in __init__
validate_tag_str(lname)
File "/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/utils/validation.py", line 29, in validate_tag_str
raise InvalidArgument(f"{value} is not a valid tag: " + ", and ".join(errors))
bentoml.exceptions.InvalidArgument: .ds_store is not a valid tag: a tag's name or version must consist of alphanumeric characters, '_', '-', or '.', and must start and end with an alphanumeric character
```
**Environment:**
- OS: MacOS 12.2.1
- Python 3.8.9
- BentoML 1.0.0a4
**Additional context**
MacOS creates .DS_Store files automatically, so I ran into this bug after opening the folder and sorting the files 😅
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/_internal/store.py`
Content:
```
1 import os
2 import typing as t
3 import datetime
4 from abc import ABC
5 from abc import abstractmethod
6 from contextlib import contextmanager
7
8 import fs
9 import fs.errors
10 from fs.base import FS
11
12 from .types import Tag
13 from .types import PathType
14 from ..exceptions import NotFound
15 from ..exceptions import BentoMLException
16
17 T = t.TypeVar("T")
18
19
20 class StoreItem(ABC):
21 @property
22 @abstractmethod
23 def tag(self) -> Tag:
24 raise NotImplementedError
25
26 @classmethod
27 @abstractmethod
28 def from_fs(cls: t.Type[T], item_fs: FS) -> T:
29 raise NotImplementedError
30
31 @property
32 @abstractmethod
33 def creation_time(self) -> datetime.datetime:
34 raise NotImplementedError
35
36 def __repr__(self):
37 return f'{self.__class__.__name__}(tag="{self.tag}")'
38
39
40 Item = t.TypeVar("Item", bound=StoreItem)
41
42
43 class Store(ABC, t.Generic[Item]):
44 """An FsStore manages items under the given base filesystem.
45
46 Note that FsStore has no consistency checks; it assumes that no direct modification
47 of the files in its directory has occurred.
48
49 """
50
51 _fs: FS
52 _item_type: t.Type[Item]
53
54 @abstractmethod
55 def __init__(self, base_path: t.Union[PathType, FS], item_type: t.Type[Item]):
56 self._item_type = item_type
57 if isinstance(base_path, os.PathLike):
58 base_path = base_path.__fspath__()
59 self._fs = fs.open_fs(base_path)
60
61 def list(self, tag: t.Optional[t.Union[Tag, str]] = None) -> t.List[Item]:
62 if not tag:
63 return [
64 ver for _d in sorted(self._fs.listdir("/")) for ver in self.list(_d)
65 ]
66
67 _tag = Tag.from_taglike(tag)
68 if _tag.version is None:
69 if not self._fs.isdir(_tag.name):
70 raise NotFound(
71 f"no {self._item_type.__name__}s with name '{_tag.name}' found"
72 )
73
74 tags = sorted(
75 [
76 Tag(_tag.name, f.name)
77 for f in self._fs.scandir(_tag.name)
78 if f.is_dir
79 ]
80 )
81 return [self._get_item(t) for t in tags]
82 else:
83 return [self._get_item(_tag)] if self._fs.isdir(_tag.path()) else []
84
85 def _get_item(self, tag: Tag) -> Item:
86 """
87 Creates a new instance of Item that represents the item with tag `tag`.
88 """
89 return self._item_type.from_fs(self._fs.opendir(tag.path()))
90
91 def get(self, tag: t.Union[Tag, str]) -> Item:
92 """
93 store.get("my_bento")
94 store.get("my_bento:v1.0.0")
95 store.get(Tag("my_bento", "latest"))
96 """
97 _tag = Tag.from_taglike(tag)
98 if _tag.version is None or _tag.version == "latest":
99 try:
100 _tag.version = self._fs.readtext(_tag.latest_path())
101 except fs.errors.ResourceNotFound:
102 raise NotFound(
103 f"no {self._item_type.__name__}s with name '{_tag.name}' exist in BentoML store {self._fs}"
104 )
105
106 path = _tag.path()
107 if self._fs.exists(path):
108 return self._get_item(_tag)
109
110 matches = self._fs.glob(f"{path}*/")
111 counts = matches.count().directories
112 if counts == 0:
113 raise NotFound(
114 f"{self._item_type.__name__} '{tag}' is not found in BentoML store {self._fs}"
115 )
116 elif counts == 1:
117 match = next(iter(matches))
118 return self._get_item(Tag(_tag.name, match.info.name))
119 else:
120 vers: t.List[str] = []
121 for match in matches:
122 vers += match.info.name
123 raise BentoMLException(
124 f"multiple versions matched by {_tag.version}: {vers}"
125 )
126
127 @contextmanager
128 def register(self, tag: t.Union[str, Tag]):
129 _tag = Tag.from_taglike(tag)
130
131 item_path = _tag.path()
132 if self._fs.exists(item_path):
133 raise BentoMLException(
134 f"Item '{_tag}' already exists in the store {self._fs}"
135 )
136 self._fs.makedirs(item_path)
137 try:
138 yield self._fs.getsyspath(item_path)
139 finally:
140 # item generation is most likely successful, link latest path
141 if (
142 not self._fs.exists(_tag.latest_path())
143 or self.get(_tag).creation_time > self.get(_tag.name).creation_time
144 ):
145 with self._fs.open(_tag.latest_path(), "w") as latest_file:
146 latest_file.write(_tag.version)
147
148 def delete(self, tag: t.Union[str, Tag]) -> None:
149 _tag = Tag.from_taglike(tag)
150
151 if not self._fs.exists(_tag.path()):
152 raise NotFound(f"{self._item_type.__name__} '{tag}' not found")
153
154 self._fs.removetree(_tag.path())
155 if self._fs.isdir(_tag.name):
156 versions = self.list(_tag.name)
157 if len(versions) == 0:
158 # if we've removed all versions, remove the directory
159 self._fs.removetree(_tag.name)
160 else:
161 new_latest = sorted(versions, key=lambda x: x.creation_time)[-1]
162 # otherwise, update the latest version
163 assert new_latest.tag.version is not None
164 self._fs.writetext(_tag.latest_path(), new_latest.tag.version)
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bentoml/_internal/store.py b/bentoml/_internal/store.py
--- a/bentoml/_internal/store.py
+++ b/bentoml/_internal/store.py
@@ -61,7 +61,10 @@
def list(self, tag: t.Optional[t.Union[Tag, str]] = None) -> t.List[Item]:
if not tag:
return [
- ver for _d in sorted(self._fs.listdir("/")) for ver in self.list(_d)
+ ver
+ for _d in sorted(self._fs.listdir("/"))
+ if self._fs.isdir(_d)
+ for ver in self.list(_d)
]
_tag = Tag.from_taglike(tag)
|
{"golden_diff": "diff --git a/bentoml/_internal/store.py b/bentoml/_internal/store.py\n--- a/bentoml/_internal/store.py\n+++ b/bentoml/_internal/store.py\n@@ -61,7 +61,10 @@\n def list(self, tag: t.Optional[t.Union[Tag, str]] = None) -> t.List[Item]:\n if not tag:\n return [\n- ver for _d in sorted(self._fs.listdir(\"/\")) for ver in self.list(_d)\n+ ver\n+ for _d in sorted(self._fs.listdir(\"/\"))\n+ if self._fs.isdir(_d)\n+ for ver in self.list(_d)\n ]\n \n _tag = Tag.from_taglike(tag)\n", "issue": "Files are read as Models\n**Describe the bug**\r\n\r\n<!--- A clear and concise description of what the bug is. -->\r\nWhen a .DS_Store file is created in the `bentoml/models` folder, I think `store.py`'s `list` method tries loading it. I think the check for directories isn't working as intended (https://github.com/bentoml/BentoML/blob/4308c67f327d9524025e5c348ded22553824c1d4/bentoml/_internal/store.py#L69), but I'm not sure. Additionally, this happens when any file is created in the models folder, but not when folders are created.\r\n\r\n\r\n**To Reproduce**\r\n\r\nSteps to reproduce the issue:\r\n1. Go to `~/bentoml/models/`\r\n2. Create a `.DS_Store` file\r\n3. Run `bentoml models list`\r\n4. See error\r\n\r\n\r\n**Expected behavior**\r\n\r\n<!--- A clear and concise description of what you expected to happen. -->\r\nAll files should be skipped when scanning for models.\r\n\r\n\r\n**Screenshots/Logs**\r\n\r\n```shell\r\n[19:12:41] WARNING [boot] converting .DS_Store to lowercase: .ds_store \r\nTraceback (most recent call last):\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/__main__.py\", line 4, in <module>\r\n create_bentoml_cli()()\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 1128, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 1053, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 1395, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/cli/model_management.py\", line 90, in list_models\r\n models = model_store.list(model_name)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py\", line 63, in list\r\n return [\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py\", line 64, in <listcomp>\r\n ver for _d in sorted(self._fs.listdir(\"/\")) for ver in self.list(_d)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/store.py\", line 67, in list\r\n _tag = Tag.from_taglike(tag)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py\", line 221, in from_taglike\r\n return cls.from_str(taglike)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py\", line 226, in from_str\r\n return cls(tag_str, None)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/types.py\", line 180, in __init__\r\n validate_tag_str(lname)\r\n File \"/Users/spence/Library/Python/3.8/lib/python/site-packages/bentoml/_internal/utils/validation.py\", line 29, in validate_tag_str\r\n raise InvalidArgument(f\"{value} is not a valid tag: \" + \", and \".join(errors))\r\nbentoml.exceptions.InvalidArgument: .ds_store is not a valid tag: a tag's name or version must consist of alphanumeric characters, '_', '-', or '.', and must start and end with an alphanumeric character\r\n```\r\n\r\n\r\n**Environment:**\r\n - OS: MacOS 12.2.1\r\n - Python 3.8.9\r\n - BentoML 1.0.0a4\r\n\r\n\r\n**Additional context**\r\n\r\nMacOS creates .DS_Store files automatically, so I ran into this bug after opening the folder and sorting the files \ud83d\ude05\r\n\n", "before_files": [{"content": "import os\nimport typing as t\nimport datetime\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\n\nimport fs\nimport fs.errors\nfrom fs.base import FS\n\nfrom .types import Tag\nfrom .types import PathType\nfrom ..exceptions import NotFound\nfrom ..exceptions import BentoMLException\n\nT = t.TypeVar(\"T\")\n\n\nclass StoreItem(ABC):\n @property\n @abstractmethod\n def tag(self) -> Tag:\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def from_fs(cls: t.Type[T], item_fs: FS) -> T:\n raise NotImplementedError\n\n @property\n @abstractmethod\n def creation_time(self) -> datetime.datetime:\n raise NotImplementedError\n\n def __repr__(self):\n return f'{self.__class__.__name__}(tag=\"{self.tag}\")'\n\n\nItem = t.TypeVar(\"Item\", bound=StoreItem)\n\n\nclass Store(ABC, t.Generic[Item]):\n \"\"\"An FsStore manages items under the given base filesystem.\n\n Note that FsStore has no consistency checks; it assumes that no direct modification\n of the files in its directory has occurred.\n\n \"\"\"\n\n _fs: FS\n _item_type: t.Type[Item]\n\n @abstractmethod\n def __init__(self, base_path: t.Union[PathType, FS], item_type: t.Type[Item]):\n self._item_type = item_type\n if isinstance(base_path, os.PathLike):\n base_path = base_path.__fspath__()\n self._fs = fs.open_fs(base_path)\n\n def list(self, tag: t.Optional[t.Union[Tag, str]] = None) -> t.List[Item]:\n if not tag:\n return [\n ver for _d in sorted(self._fs.listdir(\"/\")) for ver in self.list(_d)\n ]\n\n _tag = Tag.from_taglike(tag)\n if _tag.version is None:\n if not self._fs.isdir(_tag.name):\n raise NotFound(\n f\"no {self._item_type.__name__}s with name '{_tag.name}' found\"\n )\n\n tags = sorted(\n [\n Tag(_tag.name, f.name)\n for f in self._fs.scandir(_tag.name)\n if f.is_dir\n ]\n )\n return [self._get_item(t) for t in tags]\n else:\n return [self._get_item(_tag)] if self._fs.isdir(_tag.path()) else []\n\n def _get_item(self, tag: Tag) -> Item:\n \"\"\"\n Creates a new instance of Item that represents the item with tag `tag`.\n \"\"\"\n return self._item_type.from_fs(self._fs.opendir(tag.path()))\n\n def get(self, tag: t.Union[Tag, str]) -> Item:\n \"\"\"\n store.get(\"my_bento\")\n store.get(\"my_bento:v1.0.0\")\n store.get(Tag(\"my_bento\", \"latest\"))\n \"\"\"\n _tag = Tag.from_taglike(tag)\n if _tag.version is None or _tag.version == \"latest\":\n try:\n _tag.version = self._fs.readtext(_tag.latest_path())\n except fs.errors.ResourceNotFound:\n raise NotFound(\n f\"no {self._item_type.__name__}s with name '{_tag.name}' exist in BentoML store {self._fs}\"\n )\n\n path = _tag.path()\n if self._fs.exists(path):\n return self._get_item(_tag)\n\n matches = self._fs.glob(f\"{path}*/\")\n counts = matches.count().directories\n if counts == 0:\n raise NotFound(\n f\"{self._item_type.__name__} '{tag}' is not found in BentoML store {self._fs}\"\n )\n elif counts == 1:\n match = next(iter(matches))\n return self._get_item(Tag(_tag.name, match.info.name))\n else:\n vers: t.List[str] = []\n for match in matches:\n vers += match.info.name\n raise BentoMLException(\n f\"multiple versions matched by {_tag.version}: {vers}\"\n )\n\n @contextmanager\n def register(self, tag: t.Union[str, Tag]):\n _tag = Tag.from_taglike(tag)\n\n item_path = _tag.path()\n if self._fs.exists(item_path):\n raise BentoMLException(\n f\"Item '{_tag}' already exists in the store {self._fs}\"\n )\n self._fs.makedirs(item_path)\n try:\n yield self._fs.getsyspath(item_path)\n finally:\n # item generation is most likely successful, link latest path\n if (\n not self._fs.exists(_tag.latest_path())\n or self.get(_tag).creation_time > self.get(_tag.name).creation_time\n ):\n with self._fs.open(_tag.latest_path(), \"w\") as latest_file:\n latest_file.write(_tag.version)\n\n def delete(self, tag: t.Union[str, Tag]) -> None:\n _tag = Tag.from_taglike(tag)\n\n if not self._fs.exists(_tag.path()):\n raise NotFound(f\"{self._item_type.__name__} '{tag}' not found\")\n\n self._fs.removetree(_tag.path())\n if self._fs.isdir(_tag.name):\n versions = self.list(_tag.name)\n if len(versions) == 0:\n # if we've removed all versions, remove the directory\n self._fs.removetree(_tag.name)\n else:\n new_latest = sorted(versions, key=lambda x: x.creation_time)[-1]\n # otherwise, update the latest version\n assert new_latest.tag.version is not None\n self._fs.writetext(_tag.latest_path(), new_latest.tag.version)\n", "path": "bentoml/_internal/store.py"}], "after_files": [{"content": "import os\nimport typing as t\nimport datetime\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom contextlib import contextmanager\n\nimport fs\nimport fs.errors\nfrom fs.base import FS\n\nfrom .types import Tag\nfrom .types import PathType\nfrom ..exceptions import NotFound\nfrom ..exceptions import BentoMLException\n\nT = t.TypeVar(\"T\")\n\n\nclass StoreItem(ABC):\n @property\n @abstractmethod\n def tag(self) -> Tag:\n raise NotImplementedError\n\n @classmethod\n @abstractmethod\n def from_fs(cls: t.Type[T], item_fs: FS) -> T:\n raise NotImplementedError\n\n @property\n @abstractmethod\n def creation_time(self) -> datetime.datetime:\n raise NotImplementedError\n\n def __repr__(self):\n return f'{self.__class__.__name__}(tag=\"{self.tag}\")'\n\n\nItem = t.TypeVar(\"Item\", bound=StoreItem)\n\n\nclass Store(ABC, t.Generic[Item]):\n \"\"\"An FsStore manages items under the given base filesystem.\n\n Note that FsStore has no consistency checks; it assumes that no direct modification\n of the files in its directory has occurred.\n\n \"\"\"\n\n _fs: FS\n _item_type: t.Type[Item]\n\n @abstractmethod\n def __init__(self, base_path: t.Union[PathType, FS], item_type: t.Type[Item]):\n self._item_type = item_type\n if isinstance(base_path, os.PathLike):\n base_path = base_path.__fspath__()\n self._fs = fs.open_fs(base_path)\n\n def list(self, tag: t.Optional[t.Union[Tag, str]] = None) -> t.List[Item]:\n if not tag:\n return [\n ver\n for _d in sorted(self._fs.listdir(\"/\"))\n if self._fs.isdir(_d)\n for ver in self.list(_d)\n ]\n\n _tag = Tag.from_taglike(tag)\n if _tag.version is None:\n if not self._fs.isdir(_tag.name):\n raise NotFound(\n f\"no {self._item_type.__name__}s with name '{_tag.name}' found\"\n )\n\n tags = sorted(\n [\n Tag(_tag.name, f.name)\n for f in self._fs.scandir(_tag.name)\n if f.is_dir\n ]\n )\n return [self._get_item(t) for t in tags]\n else:\n return [self._get_item(_tag)] if self._fs.isdir(_tag.path()) else []\n\n def _get_item(self, tag: Tag) -> Item:\n \"\"\"\n Creates a new instance of Item that represents the item with tag `tag`.\n \"\"\"\n return self._item_type.from_fs(self._fs.opendir(tag.path()))\n\n def get(self, tag: t.Union[Tag, str]) -> Item:\n \"\"\"\n store.get(\"my_bento\")\n store.get(\"my_bento:v1.0.0\")\n store.get(Tag(\"my_bento\", \"latest\"))\n \"\"\"\n _tag = Tag.from_taglike(tag)\n if _tag.version is None or _tag.version == \"latest\":\n try:\n _tag.version = self._fs.readtext(_tag.latest_path())\n except fs.errors.ResourceNotFound:\n raise NotFound(\n f\"no {self._item_type.__name__}s with name '{_tag.name}' exist in BentoML store {self._fs}\"\n )\n\n path = _tag.path()\n if self._fs.exists(path):\n return self._get_item(_tag)\n\n matches = self._fs.glob(f\"{path}*/\")\n counts = matches.count().directories\n if counts == 0:\n raise NotFound(\n f\"{self._item_type.__name__} '{tag}' is not found in BentoML store {self._fs}\"\n )\n elif counts == 1:\n match = next(iter(matches))\n return self._get_item(Tag(_tag.name, match.info.name))\n else:\n vers: t.List[str] = []\n for match in matches:\n vers += match.info.name\n raise BentoMLException(\n f\"multiple versions matched by {_tag.version}: {vers}\"\n )\n\n @contextmanager\n def register(self, tag: t.Union[str, Tag]):\n _tag = Tag.from_taglike(tag)\n\n item_path = _tag.path()\n if self._fs.exists(item_path):\n raise BentoMLException(\n f\"Item '{_tag}' already exists in the store {self._fs}\"\n )\n self._fs.makedirs(item_path)\n try:\n yield self._fs.getsyspath(item_path)\n finally:\n # item generation is most likely successful, link latest path\n if (\n not self._fs.exists(_tag.latest_path())\n or self.get(_tag).creation_time > self.get(_tag.name).creation_time\n ):\n with self._fs.open(_tag.latest_path(), \"w\") as latest_file:\n latest_file.write(_tag.version)\n\n def delete(self, tag: t.Union[str, Tag]) -> None:\n _tag = Tag.from_taglike(tag)\n\n if not self._fs.exists(_tag.path()):\n raise NotFound(f\"{self._item_type.__name__} '{tag}' not found\")\n\n self._fs.removetree(_tag.path())\n if self._fs.isdir(_tag.name):\n versions = self.list(_tag.name)\n if len(versions) == 0:\n # if we've removed all versions, remove the directory\n self._fs.removetree(_tag.name)\n else:\n new_latest = sorted(versions, key=lambda x: x.creation_time)[-1]\n # otherwise, update the latest version\n assert new_latest.tag.version is not None\n self._fs.writetext(_tag.latest_path(), new_latest.tag.version)\n", "path": "bentoml/_internal/store.py"}]}
| 3,092 | 163 |
gh_patches_debug_9283
|
rasdani/github-patches
|
git_diff
|
locustio__locust-1918
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
the report cant show the right time
<!--
If you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust
You can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with "locust". Do not immediately post your issue here after posting to SO, wait for an answer there instead.
Use this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :)
Always make sure you are running an up to date Locust version (pip3 install -U locust)
-->
### Describe the bug
<!-- A clear and concise description of what the bug is -->


this picture is right time i am in china
### Expected behavior
<!-- Tell us what you think should happen -->
Wrong time zone reported
### Actual behavior
<!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. -->
### Steps to reproduce
<!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) -->
### Environment
- OS:
- Python version:
- Locust version: (please dont file issues for anything but the most recent release or prerelease builds)
- Locust command line that you ran:
- Locust file contents (anonymized if necessary):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locust/html.py`
Content:
```
1 from jinja2 import Environment, FileSystemLoader
2 import os
3 import pathlib
4 import datetime
5 from itertools import chain
6 from .stats import sort_stats
7 from .user.inspectuser import get_task_ratio_dict
8 from html import escape
9 from json import dumps
10
11
12 def render_template(file, **kwargs):
13 templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), "templates")
14 env = Environment(loader=FileSystemLoader(templates_path), extensions=["jinja2.ext.do"])
15 template = env.get_template(file)
16 return template.render(**kwargs)
17
18
19 def get_html_report(environment, show_download_link=True):
20 stats = environment.runner.stats
21
22 start_ts = stats.start_time
23 start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S")
24
25 end_ts = stats.last_request_timestamp
26 if end_ts:
27 end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S")
28 else:
29 end_time = start_time
30
31 host = None
32 if environment.host:
33 host = environment.host
34 elif environment.runner.user_classes:
35 all_hosts = set([l.host for l in environment.runner.user_classes])
36 if len(all_hosts) == 1:
37 host = list(all_hosts)[0]
38
39 requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))
40 failures_statistics = sort_stats(stats.errors)
41 exceptions_statistics = [
42 {**exc, "nodes": ", ".join(exc["nodes"])} for exc in environment.runner.exceptions.values()
43 ]
44
45 history = stats.history
46
47 static_js = []
48 js_files = ["jquery-1.11.3.min.js", "echarts.common.min.js", "vintage.js", "chart.js", "tasks.js"]
49 for js_file in js_files:
50 path = os.path.join(os.path.dirname(__file__), "static", js_file)
51 static_js.append("// " + js_file)
52 with open(path, encoding="utf8") as f:
53 static_js.append(f.read())
54 static_js.extend(["", ""])
55
56 static_css = []
57 css_files = ["tables.css"]
58 for css_file in css_files:
59 path = os.path.join(os.path.dirname(__file__), "static", "css", css_file)
60 static_css.append("/* " + css_file + " */")
61 with open(path, encoding="utf8") as f:
62 static_css.append(f.read())
63 static_css.extend(["", ""])
64
65 task_data = {
66 "per_class": get_task_ratio_dict(environment.user_classes),
67 "total": get_task_ratio_dict(environment.user_classes, total=True),
68 }
69
70 res = render_template(
71 "report.html",
72 int=int,
73 round=round,
74 requests_statistics=requests_statistics,
75 failures_statistics=failures_statistics,
76 exceptions_statistics=exceptions_statistics,
77 start_time=start_time,
78 end_time=end_time,
79 host=host,
80 history=history,
81 static_js="\n".join(static_js),
82 static_css="\n".join(static_css),
83 show_download_link=show_download_link,
84 locustfile=environment.locustfile,
85 tasks=escape(dumps(task_data)),
86 )
87
88 return res
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locust/html.py b/locust/html.py
--- a/locust/html.py
+++ b/locust/html.py
@@ -20,11 +20,11 @@
stats = environment.runner.stats
start_ts = stats.start_time
- start_time = datetime.datetime.fromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S")
+ start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime("%Y-%m-%d %H:%M:%S")
end_ts = stats.last_request_timestamp
if end_ts:
- end_time = datetime.datetime.fromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S")
+ end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime("%Y-%m-%d %H:%M:%S")
else:
end_time = start_time
|
{"golden_diff": "diff --git a/locust/html.py b/locust/html.py\n--- a/locust/html.py\n+++ b/locust/html.py\n@@ -20,11 +20,11 @@\n stats = environment.runner.stats\n \n start_ts = stats.start_time\n- start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n \n end_ts = stats.last_request_timestamp\n if end_ts:\n- end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n+ end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n", "issue": "the report cant show the right time\n<!-- \r\nIf you have a general question about how to use Locust, please check Stack Overflow first https://stackoverflow.com/questions/tagged/locust\r\n\r\nYou can also ask new questions on SO, https://stackoverflow.com/questions/ask just remember to tag your question with \"locust\". Do not immediately post your issue here after posting to SO, wait for an answer there instead.\r\n\r\nUse this form only for reporting actual bugs in Locust. Remember, the developers of Locust are unpaid volunteers, so make sure you have tried everything you can think of before filing a bug :) \r\n\r\nAlways make sure you are running an up to date Locust version (pip3 install -U locust)\r\n-->\r\n\r\n### Describe the bug\r\n<!-- A clear and concise description of what the bug is -->\r\n\r\n\r\nthis picture is right time i am in china\r\n\r\n\r\n### Expected behavior\r\n<!-- Tell us what you think should happen -->\r\nWrong time zone reported\r\n### Actual behavior\r\n<!-- Tell us what happens instead. Include screenshots if this an issue with the GUI. -->\r\n\r\n### Steps to reproduce\r\n<!-- Please provide a minimal reproducible code example (https://stackoverflow.com/help/minimal-reproducible-example) --> \r\n\r\n### Environment\r\n\r\n- OS:\r\n- Python version:\r\n- Locust version: (please dont file issues for anything but the most recent release or prerelease builds)\r\n- Locust command line that you ran:\r\n- Locust file contents (anonymized if necessary):\n", "before_files": [{"content": "from jinja2 import Environment, FileSystemLoader\nimport os\nimport pathlib\nimport datetime\nfrom itertools import chain\nfrom .stats import sort_stats\nfrom .user.inspectuser import get_task_ratio_dict\nfrom html import escape\nfrom json import dumps\n\n\ndef render_template(file, **kwargs):\n templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"templates\")\n env = Environment(loader=FileSystemLoader(templates_path), extensions=[\"jinja2.ext.do\"])\n template = env.get_template(file)\n return template.render(**kwargs)\n\n\ndef get_html_report(environment, show_download_link=True):\n stats = environment.runner.stats\n\n start_ts = stats.start_time\n start_time = datetime.datetime.fromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n end_ts = stats.last_request_timestamp\n if end_ts:\n end_time = datetime.datetime.fromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n\n host = None\n if environment.host:\n host = environment.host\n elif environment.runner.user_classes:\n all_hosts = set([l.host for l in environment.runner.user_classes])\n if len(all_hosts) == 1:\n host = list(all_hosts)[0]\n\n requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))\n failures_statistics = sort_stats(stats.errors)\n exceptions_statistics = [\n {**exc, \"nodes\": \", \".join(exc[\"nodes\"])} for exc in environment.runner.exceptions.values()\n ]\n\n history = stats.history\n\n static_js = []\n js_files = [\"jquery-1.11.3.min.js\", \"echarts.common.min.js\", \"vintage.js\", \"chart.js\", \"tasks.js\"]\n for js_file in js_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", js_file)\n static_js.append(\"// \" + js_file)\n with open(path, encoding=\"utf8\") as f:\n static_js.append(f.read())\n static_js.extend([\"\", \"\"])\n\n static_css = []\n css_files = [\"tables.css\"]\n for css_file in css_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", \"css\", css_file)\n static_css.append(\"/* \" + css_file + \" */\")\n with open(path, encoding=\"utf8\") as f:\n static_css.append(f.read())\n static_css.extend([\"\", \"\"])\n\n task_data = {\n \"per_class\": get_task_ratio_dict(environment.user_classes),\n \"total\": get_task_ratio_dict(environment.user_classes, total=True),\n }\n\n res = render_template(\n \"report.html\",\n int=int,\n round=round,\n requests_statistics=requests_statistics,\n failures_statistics=failures_statistics,\n exceptions_statistics=exceptions_statistics,\n start_time=start_time,\n end_time=end_time,\n host=host,\n history=history,\n static_js=\"\\n\".join(static_js),\n static_css=\"\\n\".join(static_css),\n show_download_link=show_download_link,\n locustfile=environment.locustfile,\n tasks=escape(dumps(task_data)),\n )\n\n return res\n", "path": "locust/html.py"}], "after_files": [{"content": "from jinja2 import Environment, FileSystemLoader\nimport os\nimport pathlib\nimport datetime\nfrom itertools import chain\nfrom .stats import sort_stats\nfrom .user.inspectuser import get_task_ratio_dict\nfrom html import escape\nfrom json import dumps\n\n\ndef render_template(file, **kwargs):\n templates_path = os.path.join(pathlib.Path(__file__).parent.absolute(), \"templates\")\n env = Environment(loader=FileSystemLoader(templates_path), extensions=[\"jinja2.ext.do\"])\n template = env.get_template(file)\n return template.render(**kwargs)\n\n\ndef get_html_report(environment, show_download_link=True):\n stats = environment.runner.stats\n\n start_ts = stats.start_time\n start_time = datetime.datetime.utcfromtimestamp(start_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n end_ts = stats.last_request_timestamp\n if end_ts:\n end_time = datetime.datetime.utcfromtimestamp(end_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n end_time = start_time\n\n host = None\n if environment.host:\n host = environment.host\n elif environment.runner.user_classes:\n all_hosts = set([l.host for l in environment.runner.user_classes])\n if len(all_hosts) == 1:\n host = list(all_hosts)[0]\n\n requests_statistics = list(chain(sort_stats(stats.entries), [stats.total]))\n failures_statistics = sort_stats(stats.errors)\n exceptions_statistics = [\n {**exc, \"nodes\": \", \".join(exc[\"nodes\"])} for exc in environment.runner.exceptions.values()\n ]\n\n history = stats.history\n\n static_js = []\n js_files = [\"jquery-1.11.3.min.js\", \"echarts.common.min.js\", \"vintage.js\", \"chart.js\", \"tasks.js\"]\n for js_file in js_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", js_file)\n static_js.append(\"// \" + js_file)\n with open(path, encoding=\"utf8\") as f:\n static_js.append(f.read())\n static_js.extend([\"\", \"\"])\n\n static_css = []\n css_files = [\"tables.css\"]\n for css_file in css_files:\n path = os.path.join(os.path.dirname(__file__), \"static\", \"css\", css_file)\n static_css.append(\"/* \" + css_file + \" */\")\n with open(path, encoding=\"utf8\") as f:\n static_css.append(f.read())\n static_css.extend([\"\", \"\"])\n\n task_data = {\n \"per_class\": get_task_ratio_dict(environment.user_classes),\n \"total\": get_task_ratio_dict(environment.user_classes, total=True),\n }\n\n res = render_template(\n \"report.html\",\n int=int,\n round=round,\n requests_statistics=requests_statistics,\n failures_statistics=failures_statistics,\n exceptions_statistics=exceptions_statistics,\n start_time=start_time,\n end_time=end_time,\n host=host,\n history=history,\n static_js=\"\\n\".join(static_js),\n static_css=\"\\n\".join(static_css),\n show_download_link=show_download_link,\n locustfile=environment.locustfile,\n tasks=escape(dumps(task_data)),\n )\n\n return res\n", "path": "locust/html.py"}]}
| 1,536 | 192 |
gh_patches_debug_15430
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-7097
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weight based rates not working as expected
### What I'm trying to achieve
…
Hi there 😃 I think there is a bug when setting up shipping methods.
### Steps to reproduce the problem
1. Setup a "Weight Based Rate" and check the "There are no value limits" checkbox.
2. Go to the storefront checkout and see that the shipping method is not shown.
### What I expected to happen
I would expect "DHL Express" to show up when the "no value limits" is enabled instead of giving me an empty "availableShippingMethods" list?
### Screenshots
--- WITH "no value limits" DISABLED ---


--- WITH "no value limits" ENABLED ---


**System information**
Operating system: Windows 10 (WSL2)
Saleor/Saleor-storefront/Saleor-Dashboard version: 2.11
Apologies if I'm misunderstanding something or if this has already been raised - couldn't find it as an issue in any of the repos.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/shipping/models.py`
Content:
```
1 from typing import TYPE_CHECKING, Union
2
3 from django.conf import settings
4 from django.db import models
5 from django.db.models import Q
6 from django_countries.fields import CountryField
7 from django_measurement.models import MeasurementField
8 from django_prices.models import MoneyField
9 from measurement.measures import Weight
10 from prices import Money, MoneyRange
11
12 from ..core.permissions import ShippingPermissions
13 from ..core.utils.translations import TranslationProxy
14 from ..core.weight import (
15 WeightUnits,
16 convert_weight,
17 get_default_weight_unit,
18 zero_weight,
19 )
20 from . import ShippingMethodType
21
22 if TYPE_CHECKING:
23 # flake8: noqa
24 from ..checkout.models import Checkout
25 from ..order.models import Order
26
27
28 def _applicable_weight_based_methods(weight, qs):
29 """Return weight based shipping methods that are applicable for the total weight."""
30 qs = qs.weight_based()
31 min_weight_matched = Q(minimum_order_weight__lte=weight)
32 no_weight_limit = Q(maximum_order_weight__isnull=True)
33 max_weight_matched = Q(maximum_order_weight__gte=weight)
34 return qs.filter(min_weight_matched & (no_weight_limit | max_weight_matched))
35
36
37 def _applicable_price_based_methods(price: Money, qs):
38 """Return price based shipping methods that are applicable for the given total."""
39 qs = qs.price_based()
40 min_price_matched = Q(minimum_order_price_amount__lte=price.amount)
41 no_price_limit = Q(maximum_order_price_amount__isnull=True)
42 max_price_matched = Q(maximum_order_price_amount__gte=price.amount)
43 return qs.filter(min_price_matched & (no_price_limit | max_price_matched))
44
45
46 def _get_weight_type_display(min_weight, max_weight):
47 default_unit = get_default_weight_unit()
48
49 if min_weight.unit != default_unit:
50 min_weight = convert_weight(min_weight, default_unit)
51 if max_weight and max_weight.unit != default_unit:
52 max_weight = convert_weight(max_weight, default_unit)
53
54 if max_weight is None:
55 return ("%(min_weight)s and up" % {"min_weight": min_weight},)
56 return "%(min_weight)s to %(max_weight)s" % {
57 "min_weight": min_weight,
58 "max_weight": max_weight,
59 }
60
61
62 class ShippingZone(models.Model):
63 name = models.CharField(max_length=100)
64 countries = CountryField(multiple=True, default=[], blank=True)
65 default = models.BooleanField(default=False)
66
67 def __str__(self):
68 return self.name
69
70 @property
71 def price_range(self):
72 prices = [
73 shipping_method.get_total()
74 for shipping_method in self.shipping_methods.all()
75 ]
76 if prices:
77 return MoneyRange(min(prices), max(prices))
78 return None
79
80 class Meta:
81 permissions = (
82 (ShippingPermissions.MANAGE_SHIPPING.codename, "Manage shipping."),
83 )
84
85
86 class ShippingMethodQueryset(models.QuerySet):
87 def price_based(self):
88 return self.filter(type=ShippingMethodType.PRICE_BASED)
89
90 def weight_based(self):
91 return self.filter(type=ShippingMethodType.WEIGHT_BASED)
92
93 def applicable_shipping_methods(self, price: Money, weight, country_code):
94 """Return the ShippingMethods that can be used on an order with shipment.
95
96 It is based on the given country code, and by shipping methods that are
97 applicable to the given price & weight total.
98 """
99 qs = self.filter(
100 shipping_zone__countries__contains=country_code, currency=price.currency,
101 )
102 qs = qs.prefetch_related("shipping_zone").order_by("price_amount")
103 price_based_methods = _applicable_price_based_methods(price, qs)
104 weight_based_methods = _applicable_weight_based_methods(weight, qs)
105 return price_based_methods | weight_based_methods
106
107 def applicable_shipping_methods_for_instance(
108 self, instance: Union["Checkout", "Order"], price: Money, country_code=None
109 ):
110 if not instance.is_shipping_required():
111 return None
112 if not instance.shipping_address:
113 return None
114
115 return self.applicable_shipping_methods(
116 price=price,
117 weight=instance.get_total_weight(),
118 country_code=country_code or instance.shipping_address.country.code,
119 )
120
121
122 class ShippingMethod(models.Model):
123 name = models.CharField(max_length=100)
124 type = models.CharField(max_length=30, choices=ShippingMethodType.CHOICES)
125 currency = models.CharField(
126 max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,
127 default=settings.DEFAULT_CURRENCY,
128 )
129 price_amount = models.DecimalField(
130 max_digits=settings.DEFAULT_MAX_DIGITS,
131 decimal_places=settings.DEFAULT_DECIMAL_PLACES,
132 default=0,
133 )
134 price = MoneyField(amount_field="price_amount", currency_field="currency")
135 shipping_zone = models.ForeignKey(
136 ShippingZone, related_name="shipping_methods", on_delete=models.CASCADE
137 )
138
139 minimum_order_price_amount = models.DecimalField(
140 max_digits=settings.DEFAULT_MAX_DIGITS,
141 decimal_places=settings.DEFAULT_DECIMAL_PLACES,
142 default=0,
143 blank=True,
144 null=True,
145 )
146 minimum_order_price = MoneyField(
147 amount_field="minimum_order_price_amount", currency_field="currency"
148 )
149
150 maximum_order_price_amount = models.DecimalField(
151 max_digits=settings.DEFAULT_MAX_DIGITS,
152 decimal_places=settings.DEFAULT_DECIMAL_PLACES,
153 blank=True,
154 null=True,
155 )
156 maximum_order_price = MoneyField(
157 amount_field="maximum_order_price_amount", currency_field="currency"
158 )
159
160 minimum_order_weight = MeasurementField(
161 measurement=Weight,
162 unit_choices=WeightUnits.CHOICES,
163 default=zero_weight,
164 blank=True,
165 null=True,
166 )
167 maximum_order_weight = MeasurementField(
168 measurement=Weight, unit_choices=WeightUnits.CHOICES, blank=True, null=True
169 )
170
171 objects = ShippingMethodQueryset.as_manager()
172 translated = TranslationProxy()
173
174 class Meta:
175 ordering = ("pk",)
176
177 def __str__(self):
178 return self.name
179
180 def __repr__(self):
181 if self.type == ShippingMethodType.PRICE_BASED:
182 minimum = "%s%s" % (
183 self.minimum_order_price.amount,
184 self.minimum_order_price.currency,
185 )
186 max_price = self.maximum_order_price
187 maximum = (
188 "%s%s" % (max_price.amount, max_price.currency)
189 if max_price
190 else "no limit"
191 )
192 return "ShippingMethod(type=%s min=%s, max=%s)" % (
193 self.type,
194 minimum,
195 maximum,
196 )
197 return "ShippingMethod(type=%s weight_range=(%s)" % (
198 self.type,
199 _get_weight_type_display(
200 self.minimum_order_weight, self.maximum_order_weight
201 ),
202 )
203
204 def get_total(self):
205 return self.price
206
207
208 class ShippingMethodTranslation(models.Model):
209 language_code = models.CharField(max_length=10)
210 name = models.CharField(max_length=255, null=True, blank=True)
211 shipping_method = models.ForeignKey(
212 ShippingMethod, related_name="translations", on_delete=models.CASCADE
213 )
214
215 class Meta:
216 unique_together = (("language_code", "shipping_method"),)
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/shipping/models.py b/saleor/shipping/models.py
--- a/saleor/shipping/models.py
+++ b/saleor/shipping/models.py
@@ -28,10 +28,13 @@
def _applicable_weight_based_methods(weight, qs):
"""Return weight based shipping methods that are applicable for the total weight."""
qs = qs.weight_based()
- min_weight_matched = Q(minimum_order_weight__lte=weight)
- no_weight_limit = Q(maximum_order_weight__isnull=True)
- max_weight_matched = Q(maximum_order_weight__gte=weight)
- return qs.filter(min_weight_matched & (no_weight_limit | max_weight_matched))
+ min_weight_matched = Q(minimum_order_weight__lte=weight) | Q(
+ minimum_order_weight__isnull=True
+ )
+ max_weight_matched = Q(maximum_order_weight__gte=weight) | Q(
+ maximum_order_weight__isnull=True
+ )
+ return qs.filter(min_weight_matched & max_weight_matched)
def _applicable_price_based_methods(price: Money, qs):
|
{"golden_diff": "diff --git a/saleor/shipping/models.py b/saleor/shipping/models.py\n--- a/saleor/shipping/models.py\n+++ b/saleor/shipping/models.py\n@@ -28,10 +28,13 @@\n def _applicable_weight_based_methods(weight, qs):\n \"\"\"Return weight based shipping methods that are applicable for the total weight.\"\"\"\n qs = qs.weight_based()\n- min_weight_matched = Q(minimum_order_weight__lte=weight)\n- no_weight_limit = Q(maximum_order_weight__isnull=True)\n- max_weight_matched = Q(maximum_order_weight__gte=weight)\n- return qs.filter(min_weight_matched & (no_weight_limit | max_weight_matched))\n+ min_weight_matched = Q(minimum_order_weight__lte=weight) | Q(\n+ minimum_order_weight__isnull=True\n+ )\n+ max_weight_matched = Q(maximum_order_weight__gte=weight) | Q(\n+ maximum_order_weight__isnull=True\n+ )\n+ return qs.filter(min_weight_matched & max_weight_matched)\n \n \n def _applicable_price_based_methods(price: Money, qs):\n", "issue": "Weight based rates not working as expected\n### What I'm trying to achieve\r\n\u2026\r\nHi there \ud83d\ude03 I think there is a bug when setting up shipping methods. \r\n### Steps to reproduce the problem\r\n1. Setup a \"Weight Based Rate\" and check the \"There are no value limits\" checkbox.\r\n2. Go to the storefront checkout and see that the shipping method is not shown.\r\n\r\n### What I expected to happen\r\nI would expect \"DHL Express\" to show up when the \"no value limits\" is enabled instead of giving me an empty \"availableShippingMethods\" list?\r\n\r\n### Screenshots\r\n--- WITH \"no value limits\" DISABLED ---\r\n\r\n\r\n\r\n\r\n\r\n--- WITH \"no value limits\" ENABLED ---\r\n\r\n\r\n\r\n\r\n\r\n\r\n**System information**\r\nOperating system: Windows 10 (WSL2)\r\nSaleor/Saleor-storefront/Saleor-Dashboard version: 2.11\r\n\r\nApologies if I'm misunderstanding something or if this has already been raised - couldn't find it as an issue in any of the repos.\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Union\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Q\nfrom django_countries.fields import CountryField\nfrom django_measurement.models import MeasurementField\nfrom django_prices.models import MoneyField\nfrom measurement.measures import Weight\nfrom prices import Money, MoneyRange\n\nfrom ..core.permissions import ShippingPermissions\nfrom ..core.utils.translations import TranslationProxy\nfrom ..core.weight import (\n WeightUnits,\n convert_weight,\n get_default_weight_unit,\n zero_weight,\n)\nfrom . import ShippingMethodType\n\nif TYPE_CHECKING:\n # flake8: noqa\n from ..checkout.models import Checkout\n from ..order.models import Order\n\n\ndef _applicable_weight_based_methods(weight, qs):\n \"\"\"Return weight based shipping methods that are applicable for the total weight.\"\"\"\n qs = qs.weight_based()\n min_weight_matched = Q(minimum_order_weight__lte=weight)\n no_weight_limit = Q(maximum_order_weight__isnull=True)\n max_weight_matched = Q(maximum_order_weight__gte=weight)\n return qs.filter(min_weight_matched & (no_weight_limit | max_weight_matched))\n\n\ndef _applicable_price_based_methods(price: Money, qs):\n \"\"\"Return price based shipping methods that are applicable for the given total.\"\"\"\n qs = qs.price_based()\n min_price_matched = Q(minimum_order_price_amount__lte=price.amount)\n no_price_limit = Q(maximum_order_price_amount__isnull=True)\n max_price_matched = Q(maximum_order_price_amount__gte=price.amount)\n return qs.filter(min_price_matched & (no_price_limit | max_price_matched))\n\n\ndef _get_weight_type_display(min_weight, max_weight):\n default_unit = get_default_weight_unit()\n\n if min_weight.unit != default_unit:\n min_weight = convert_weight(min_weight, default_unit)\n if max_weight and max_weight.unit != default_unit:\n max_weight = convert_weight(max_weight, default_unit)\n\n if max_weight is None:\n return (\"%(min_weight)s and up\" % {\"min_weight\": min_weight},)\n return \"%(min_weight)s to %(max_weight)s\" % {\n \"min_weight\": min_weight,\n \"max_weight\": max_weight,\n }\n\n\nclass ShippingZone(models.Model):\n name = models.CharField(max_length=100)\n countries = CountryField(multiple=True, default=[], blank=True)\n default = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n @property\n def price_range(self):\n prices = [\n shipping_method.get_total()\n for shipping_method in self.shipping_methods.all()\n ]\n if prices:\n return MoneyRange(min(prices), max(prices))\n return None\n\n class Meta:\n permissions = (\n (ShippingPermissions.MANAGE_SHIPPING.codename, \"Manage shipping.\"),\n )\n\n\nclass ShippingMethodQueryset(models.QuerySet):\n def price_based(self):\n return self.filter(type=ShippingMethodType.PRICE_BASED)\n\n def weight_based(self):\n return self.filter(type=ShippingMethodType.WEIGHT_BASED)\n\n def applicable_shipping_methods(self, price: Money, weight, country_code):\n \"\"\"Return the ShippingMethods that can be used on an order with shipment.\n\n It is based on the given country code, and by shipping methods that are\n applicable to the given price & weight total.\n \"\"\"\n qs = self.filter(\n shipping_zone__countries__contains=country_code, currency=price.currency,\n )\n qs = qs.prefetch_related(\"shipping_zone\").order_by(\"price_amount\")\n price_based_methods = _applicable_price_based_methods(price, qs)\n weight_based_methods = _applicable_weight_based_methods(weight, qs)\n return price_based_methods | weight_based_methods\n\n def applicable_shipping_methods_for_instance(\n self, instance: Union[\"Checkout\", \"Order\"], price: Money, country_code=None\n ):\n if not instance.is_shipping_required():\n return None\n if not instance.shipping_address:\n return None\n\n return self.applicable_shipping_methods(\n price=price,\n weight=instance.get_total_weight(),\n country_code=country_code or instance.shipping_address.country.code,\n )\n\n\nclass ShippingMethod(models.Model):\n name = models.CharField(max_length=100)\n type = models.CharField(max_length=30, choices=ShippingMethodType.CHOICES)\n currency = models.CharField(\n max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,\n default=settings.DEFAULT_CURRENCY,\n )\n price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n default=0,\n )\n price = MoneyField(amount_field=\"price_amount\", currency_field=\"currency\")\n shipping_zone = models.ForeignKey(\n ShippingZone, related_name=\"shipping_methods\", on_delete=models.CASCADE\n )\n\n minimum_order_price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n default=0,\n blank=True,\n null=True,\n )\n minimum_order_price = MoneyField(\n amount_field=\"minimum_order_price_amount\", currency_field=\"currency\"\n )\n\n maximum_order_price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n blank=True,\n null=True,\n )\n maximum_order_price = MoneyField(\n amount_field=\"maximum_order_price_amount\", currency_field=\"currency\"\n )\n\n minimum_order_weight = MeasurementField(\n measurement=Weight,\n unit_choices=WeightUnits.CHOICES,\n default=zero_weight,\n blank=True,\n null=True,\n )\n maximum_order_weight = MeasurementField(\n measurement=Weight, unit_choices=WeightUnits.CHOICES, blank=True, null=True\n )\n\n objects = ShippingMethodQueryset.as_manager()\n translated = TranslationProxy()\n\n class Meta:\n ordering = (\"pk\",)\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n if self.type == ShippingMethodType.PRICE_BASED:\n minimum = \"%s%s\" % (\n self.minimum_order_price.amount,\n self.minimum_order_price.currency,\n )\n max_price = self.maximum_order_price\n maximum = (\n \"%s%s\" % (max_price.amount, max_price.currency)\n if max_price\n else \"no limit\"\n )\n return \"ShippingMethod(type=%s min=%s, max=%s)\" % (\n self.type,\n minimum,\n maximum,\n )\n return \"ShippingMethod(type=%s weight_range=(%s)\" % (\n self.type,\n _get_weight_type_display(\n self.minimum_order_weight, self.maximum_order_weight\n ),\n )\n\n def get_total(self):\n return self.price\n\n\nclass ShippingMethodTranslation(models.Model):\n language_code = models.CharField(max_length=10)\n name = models.CharField(max_length=255, null=True, blank=True)\n shipping_method = models.ForeignKey(\n ShippingMethod, related_name=\"translations\", on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ((\"language_code\", \"shipping_method\"),)\n", "path": "saleor/shipping/models.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Union\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Q\nfrom django_countries.fields import CountryField\nfrom django_measurement.models import MeasurementField\nfrom django_prices.models import MoneyField\nfrom measurement.measures import Weight\nfrom prices import Money, MoneyRange\n\nfrom ..core.permissions import ShippingPermissions\nfrom ..core.utils.translations import TranslationProxy\nfrom ..core.weight import (\n WeightUnits,\n convert_weight,\n get_default_weight_unit,\n zero_weight,\n)\nfrom . import ShippingMethodType\n\nif TYPE_CHECKING:\n # flake8: noqa\n from ..checkout.models import Checkout\n from ..order.models import Order\n\n\ndef _applicable_weight_based_methods(weight, qs):\n \"\"\"Return weight based shipping methods that are applicable for the total weight.\"\"\"\n qs = qs.weight_based()\n min_weight_matched = Q(minimum_order_weight__lte=weight) | Q(\n minimum_order_weight__isnull=True\n )\n max_weight_matched = Q(maximum_order_weight__gte=weight) | Q(\n maximum_order_weight__isnull=True\n )\n return qs.filter(min_weight_matched & max_weight_matched)\n\n\ndef _applicable_price_based_methods(price: Money, qs):\n \"\"\"Return price based shipping methods that are applicable for the given total.\"\"\"\n qs = qs.price_based()\n min_price_matched = Q(minimum_order_price_amount__lte=price.amount)\n no_price_limit = Q(maximum_order_price_amount__isnull=True)\n max_price_matched = Q(maximum_order_price_amount__gte=price.amount)\n return qs.filter(min_price_matched & (no_price_limit | max_price_matched))\n\n\ndef _get_weight_type_display(min_weight, max_weight):\n default_unit = get_default_weight_unit()\n\n if min_weight.unit != default_unit:\n min_weight = convert_weight(min_weight, default_unit)\n if max_weight and max_weight.unit != default_unit:\n max_weight = convert_weight(max_weight, default_unit)\n\n if max_weight is None:\n return (\"%(min_weight)s and up\" % {\"min_weight\": min_weight},)\n return \"%(min_weight)s to %(max_weight)s\" % {\n \"min_weight\": min_weight,\n \"max_weight\": max_weight,\n }\n\n\nclass ShippingZone(models.Model):\n name = models.CharField(max_length=100)\n countries = CountryField(multiple=True, default=[], blank=True)\n default = models.BooleanField(default=False)\n\n def __str__(self):\n return self.name\n\n @property\n def price_range(self):\n prices = [\n shipping_method.get_total()\n for shipping_method in self.shipping_methods.all()\n ]\n if prices:\n return MoneyRange(min(prices), max(prices))\n return None\n\n class Meta:\n permissions = (\n (ShippingPermissions.MANAGE_SHIPPING.codename, \"Manage shipping.\"),\n )\n\n\nclass ShippingMethodQueryset(models.QuerySet):\n def price_based(self):\n return self.filter(type=ShippingMethodType.PRICE_BASED)\n\n def weight_based(self):\n return self.filter(type=ShippingMethodType.WEIGHT_BASED)\n\n def applicable_shipping_methods(self, price: Money, weight, country_code):\n \"\"\"Return the ShippingMethods that can be used on an order with shipment.\n\n It is based on the given country code, and by shipping methods that are\n applicable to the given price & weight total.\n \"\"\"\n qs = self.filter(\n shipping_zone__countries__contains=country_code, currency=price.currency,\n )\n qs = qs.prefetch_related(\"shipping_zone\").order_by(\"price_amount\")\n price_based_methods = _applicable_price_based_methods(price, qs)\n weight_based_methods = _applicable_weight_based_methods(weight, qs)\n return price_based_methods | weight_based_methods\n\n def applicable_shipping_methods_for_instance(\n self, instance: Union[\"Checkout\", \"Order\"], price: Money, country_code=None\n ):\n if not instance.is_shipping_required():\n return None\n if not instance.shipping_address:\n return None\n\n return self.applicable_shipping_methods(\n price=price,\n weight=instance.get_total_weight(),\n country_code=country_code or instance.shipping_address.country.code,\n )\n\n\nclass ShippingMethod(models.Model):\n name = models.CharField(max_length=100)\n type = models.CharField(max_length=30, choices=ShippingMethodType.CHOICES)\n currency = models.CharField(\n max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH,\n default=settings.DEFAULT_CURRENCY,\n )\n price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n default=0,\n )\n price = MoneyField(amount_field=\"price_amount\", currency_field=\"currency\")\n shipping_zone = models.ForeignKey(\n ShippingZone, related_name=\"shipping_methods\", on_delete=models.CASCADE\n )\n\n minimum_order_price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n default=0,\n blank=True,\n null=True,\n )\n minimum_order_price = MoneyField(\n amount_field=\"minimum_order_price_amount\", currency_field=\"currency\"\n )\n\n maximum_order_price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n blank=True,\n null=True,\n )\n maximum_order_price = MoneyField(\n amount_field=\"maximum_order_price_amount\", currency_field=\"currency\"\n )\n\n minimum_order_weight = MeasurementField(\n measurement=Weight,\n unit_choices=WeightUnits.CHOICES,\n default=zero_weight,\n blank=True,\n null=True,\n )\n maximum_order_weight = MeasurementField(\n measurement=Weight, unit_choices=WeightUnits.CHOICES, blank=True, null=True\n )\n\n objects = ShippingMethodQueryset.as_manager()\n translated = TranslationProxy()\n\n class Meta:\n ordering = (\"pk\",)\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n if self.type == ShippingMethodType.PRICE_BASED:\n minimum = \"%s%s\" % (\n self.minimum_order_price.amount,\n self.minimum_order_price.currency,\n )\n max_price = self.maximum_order_price\n maximum = (\n \"%s%s\" % (max_price.amount, max_price.currency)\n if max_price\n else \"no limit\"\n )\n return \"ShippingMethod(type=%s min=%s, max=%s)\" % (\n self.type,\n minimum,\n maximum,\n )\n return \"ShippingMethod(type=%s weight_range=(%s)\" % (\n self.type,\n _get_weight_type_display(\n self.minimum_order_weight, self.maximum_order_weight\n ),\n )\n\n def get_total(self):\n return self.price\n\n\nclass ShippingMethodTranslation(models.Model):\n language_code = models.CharField(max_length=10)\n name = models.CharField(max_length=255, null=True, blank=True)\n shipping_method = models.ForeignKey(\n ShippingMethod, related_name=\"translations\", on_delete=models.CASCADE\n )\n\n class Meta:\n unique_together = ((\"language_code\", \"shipping_method\"),)\n", "path": "saleor/shipping/models.py"}]}
| 2,799 | 244 |
gh_patches_debug_5677
|
rasdani/github-patches
|
git_diff
|
huggingface__diffusers-5115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
convert_original_stable_diffison script have issue
https://github.com/huggingface/diffusers/blob/8263cf00f832399bca215e29fa7572e0b0bde4da/scripts/convert_original_stable_diffusion_to_diffusers.py#L157C9-L157C40
config_files command line argment is not include in this script.
I think we need to add this code
parser.add_argument(
"--config_files",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/convert_original_stable_diffusion_to_diffusers.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2023 The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """ Conversion script for the LDM checkpoints. """
16
17 import argparse
18 import importlib
19
20 import torch
21
22 from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
23
24
25 if __name__ == "__main__":
26 parser = argparse.ArgumentParser()
27
28 parser.add_argument(
29 "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
30 )
31 # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
32 parser.add_argument(
33 "--original_config_file",
34 default=None,
35 type=str,
36 help="The YAML config file corresponding to the original architecture.",
37 )
38 parser.add_argument(
39 "--num_in_channels",
40 default=None,
41 type=int,
42 help="The number of input channels. If `None` number of input channels will be automatically inferred.",
43 )
44 parser.add_argument(
45 "--scheduler_type",
46 default="pndm",
47 type=str,
48 help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
49 )
50 parser.add_argument(
51 "--pipeline_type",
52 default=None,
53 type=str,
54 help=(
55 "The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
56 ". If `None` pipeline will be automatically inferred."
57 ),
58 )
59 parser.add_argument(
60 "--image_size",
61 default=None,
62 type=int,
63 help=(
64 "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
65 " Base. Use 768 for Stable Diffusion v2."
66 ),
67 )
68 parser.add_argument(
69 "--prediction_type",
70 default=None,
71 type=str,
72 help=(
73 "The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
74 " Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
75 ),
76 )
77 parser.add_argument(
78 "--extract_ema",
79 action="store_true",
80 help=(
81 "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
82 " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
83 " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
84 ),
85 )
86 parser.add_argument(
87 "--upcast_attention",
88 action="store_true",
89 help=(
90 "Whether the attention computation should always be upcasted. This is necessary when running stable"
91 " diffusion 2.1."
92 ),
93 )
94 parser.add_argument(
95 "--from_safetensors",
96 action="store_true",
97 help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
98 )
99 parser.add_argument(
100 "--to_safetensors",
101 action="store_true",
102 help="Whether to store pipeline in safetensors format or not.",
103 )
104 parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
105 parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
106 parser.add_argument(
107 "--stable_unclip",
108 type=str,
109 default=None,
110 required=False,
111 help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
112 )
113 parser.add_argument(
114 "--stable_unclip_prior",
115 type=str,
116 default=None,
117 required=False,
118 help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
119 )
120 parser.add_argument(
121 "--clip_stats_path",
122 type=str,
123 help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
124 required=False,
125 )
126 parser.add_argument(
127 "--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
128 )
129 parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
130 parser.add_argument(
131 "--vae_path",
132 type=str,
133 default=None,
134 required=False,
135 help="Set to a path, hub id to an already converted vae to not convert it again.",
136 )
137 parser.add_argument(
138 "--pipeline_class_name",
139 type=str,
140 default=None,
141 required=False,
142 help="Specify the pipeline class name",
143 )
144
145 args = parser.parse_args()
146
147 if args.pipeline_class_name is not None:
148 library = importlib.import_module("diffusers")
149 class_obj = getattr(library, args.pipeline_class_name)
150 pipeline_class = class_obj
151 else:
152 pipeline_class = None
153
154 pipe = download_from_original_stable_diffusion_ckpt(
155 checkpoint_path_or_dict=args.checkpoint_path,
156 original_config_file=args.original_config_file,
157 config_files=args.config_files,
158 image_size=args.image_size,
159 prediction_type=args.prediction_type,
160 model_type=args.pipeline_type,
161 extract_ema=args.extract_ema,
162 scheduler_type=args.scheduler_type,
163 num_in_channels=args.num_in_channels,
164 upcast_attention=args.upcast_attention,
165 from_safetensors=args.from_safetensors,
166 device=args.device,
167 stable_unclip=args.stable_unclip,
168 stable_unclip_prior=args.stable_unclip_prior,
169 clip_stats_path=args.clip_stats_path,
170 controlnet=args.controlnet,
171 vae_path=args.vae_path,
172 pipeline_class=pipeline_class,
173 )
174
175 if args.half:
176 pipe.to(torch_dtype=torch.float16)
177
178 if args.controlnet:
179 # only save the controlnet model
180 pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
181 else:
182 pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/convert_original_stable_diffusion_to_diffusers.py b/scripts/convert_original_stable_diffusion_to_diffusers.py
--- a/scripts/convert_original_stable_diffusion_to_diffusers.py
+++ b/scripts/convert_original_stable_diffusion_to_diffusers.py
@@ -35,6 +35,12 @@
type=str,
help="The YAML config file corresponding to the original architecture.",
)
+ parser.add_argument(
+ "--config_files",
+ default=None,
+ type=str,
+ help="The YAML config file corresponding to the architecture.",
+ )
parser.add_argument(
"--num_in_channels",
default=None,
|
{"golden_diff": "diff --git a/scripts/convert_original_stable_diffusion_to_diffusers.py b/scripts/convert_original_stable_diffusion_to_diffusers.py\n--- a/scripts/convert_original_stable_diffusion_to_diffusers.py\n+++ b/scripts/convert_original_stable_diffusion_to_diffusers.py\n@@ -35,6 +35,12 @@\n type=str,\n help=\"The YAML config file corresponding to the original architecture.\",\n )\n+ parser.add_argument(\n+ \"--config_files\",\n+ default=None,\n+ type=str,\n+ help=\"The YAML config file corresponding to the architecture.\",\n+ )\n parser.add_argument(\n \"--num_in_channels\",\n default=None,\n", "issue": "convert_original_stable_diffison script have issue\nhttps://github.com/huggingface/diffusers/blob/8263cf00f832399bca215e29fa7572e0b0bde4da/scripts/convert_original_stable_diffusion_to_diffusers.py#L157C9-L157C40\r\n\r\nconfig_files command line argment is not include in this script.\r\nI think we need to add this code\r\n\r\nparser.add_argument(\r\n \"--config_files\",\r\n default=None,\r\n type=str,\r\n help=\"The YAML config file corresponding to the original architecture.\",\r\n )\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2023 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Conversion script for the LDM checkpoints. \"\"\"\n\nimport argparse\nimport importlib\n\nimport torch\n\nfrom diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--checkpoint_path\", default=None, type=str, required=True, help=\"Path to the checkpoint to convert.\"\n )\n # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml\n parser.add_argument(\n \"--original_config_file\",\n default=None,\n type=str,\n help=\"The YAML config file corresponding to the original architecture.\",\n )\n parser.add_argument(\n \"--num_in_channels\",\n default=None,\n type=int,\n help=\"The number of input channels. If `None` number of input channels will be automatically inferred.\",\n )\n parser.add_argument(\n \"--scheduler_type\",\n default=\"pndm\",\n type=str,\n help=\"Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']\",\n )\n parser.add_argument(\n \"--pipeline_type\",\n default=None,\n type=str,\n help=(\n \"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'\"\n \". If `None` pipeline will be automatically inferred.\"\n ),\n )\n parser.add_argument(\n \"--image_size\",\n default=None,\n type=int,\n help=(\n \"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2\"\n \" Base. Use 768 for Stable Diffusion v2.\"\n ),\n )\n parser.add_argument(\n \"--prediction_type\",\n default=None,\n type=str,\n help=(\n \"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable\"\n \" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.\"\n ),\n )\n parser.add_argument(\n \"--extract_ema\",\n action=\"store_true\",\n help=(\n \"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights\"\n \" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield\"\n \" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.\"\n ),\n )\n parser.add_argument(\n \"--upcast_attention\",\n action=\"store_true\",\n help=(\n \"Whether the attention computation should always be upcasted. This is necessary when running stable\"\n \" diffusion 2.1.\"\n ),\n )\n parser.add_argument(\n \"--from_safetensors\",\n action=\"store_true\",\n help=\"If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.\",\n )\n parser.add_argument(\n \"--to_safetensors\",\n action=\"store_true\",\n help=\"Whether to store pipeline in safetensors format or not.\",\n )\n parser.add_argument(\"--dump_path\", default=None, type=str, required=True, help=\"Path to the output model.\")\n parser.add_argument(\"--device\", type=str, help=\"Device to use (e.g. cpu, cuda:0, cuda:1, etc.)\")\n parser.add_argument(\n \"--stable_unclip\",\n type=str,\n default=None,\n required=False,\n help=\"Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.\",\n )\n parser.add_argument(\n \"--stable_unclip_prior\",\n type=str,\n default=None,\n required=False,\n help=\"Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.\",\n )\n parser.add_argument(\n \"--clip_stats_path\",\n type=str,\n help=\"Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.\",\n required=False,\n )\n parser.add_argument(\n \"--controlnet\", action=\"store_true\", default=None, help=\"Set flag if this is a controlnet checkpoint.\"\n )\n parser.add_argument(\"--half\", action=\"store_true\", help=\"Save weights in half precision.\")\n parser.add_argument(\n \"--vae_path\",\n type=str,\n default=None,\n required=False,\n help=\"Set to a path, hub id to an already converted vae to not convert it again.\",\n )\n parser.add_argument(\n \"--pipeline_class_name\",\n type=str,\n default=None,\n required=False,\n help=\"Specify the pipeline class name\",\n )\n\n args = parser.parse_args()\n\n if args.pipeline_class_name is not None:\n library = importlib.import_module(\"diffusers\")\n class_obj = getattr(library, args.pipeline_class_name)\n pipeline_class = class_obj\n else:\n pipeline_class = None\n\n pipe = download_from_original_stable_diffusion_ckpt(\n checkpoint_path_or_dict=args.checkpoint_path,\n original_config_file=args.original_config_file,\n config_files=args.config_files,\n image_size=args.image_size,\n prediction_type=args.prediction_type,\n model_type=args.pipeline_type,\n extract_ema=args.extract_ema,\n scheduler_type=args.scheduler_type,\n num_in_channels=args.num_in_channels,\n upcast_attention=args.upcast_attention,\n from_safetensors=args.from_safetensors,\n device=args.device,\n stable_unclip=args.stable_unclip,\n stable_unclip_prior=args.stable_unclip_prior,\n clip_stats_path=args.clip_stats_path,\n controlnet=args.controlnet,\n vae_path=args.vae_path,\n pipeline_class=pipeline_class,\n )\n\n if args.half:\n pipe.to(torch_dtype=torch.float16)\n\n if args.controlnet:\n # only save the controlnet model\n pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)\n else:\n pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)\n", "path": "scripts/convert_original_stable_diffusion_to_diffusers.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2023 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Conversion script for the LDM checkpoints. \"\"\"\n\nimport argparse\nimport importlib\n\nimport torch\n\nfrom diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--checkpoint_path\", default=None, type=str, required=True, help=\"Path to the checkpoint to convert.\"\n )\n # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml\n parser.add_argument(\n \"--original_config_file\",\n default=None,\n type=str,\n help=\"The YAML config file corresponding to the original architecture.\",\n )\n parser.add_argument(\n \"--config_files\",\n default=None,\n type=str,\n help=\"The YAML config file corresponding to the architecture.\",\n )\n parser.add_argument(\n \"--num_in_channels\",\n default=None,\n type=int,\n help=\"The number of input channels. If `None` number of input channels will be automatically inferred.\",\n )\n parser.add_argument(\n \"--scheduler_type\",\n default=\"pndm\",\n type=str,\n help=\"Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']\",\n )\n parser.add_argument(\n \"--pipeline_type\",\n default=None,\n type=str,\n help=(\n \"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'\"\n \". If `None` pipeline will be automatically inferred.\"\n ),\n )\n parser.add_argument(\n \"--image_size\",\n default=None,\n type=int,\n help=(\n \"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2\"\n \" Base. Use 768 for Stable Diffusion v2.\"\n ),\n )\n parser.add_argument(\n \"--prediction_type\",\n default=None,\n type=str,\n help=(\n \"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable\"\n \" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.\"\n ),\n )\n parser.add_argument(\n \"--extract_ema\",\n action=\"store_true\",\n help=(\n \"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights\"\n \" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield\"\n \" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.\"\n ),\n )\n parser.add_argument(\n \"--upcast_attention\",\n action=\"store_true\",\n help=(\n \"Whether the attention computation should always be upcasted. This is necessary when running stable\"\n \" diffusion 2.1.\"\n ),\n )\n parser.add_argument(\n \"--from_safetensors\",\n action=\"store_true\",\n help=\"If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.\",\n )\n parser.add_argument(\n \"--to_safetensors\",\n action=\"store_true\",\n help=\"Whether to store pipeline in safetensors format or not.\",\n )\n parser.add_argument(\"--dump_path\", default=None, type=str, required=True, help=\"Path to the output model.\")\n parser.add_argument(\"--device\", type=str, help=\"Device to use (e.g. cpu, cuda:0, cuda:1, etc.)\")\n parser.add_argument(\n \"--stable_unclip\",\n type=str,\n default=None,\n required=False,\n help=\"Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.\",\n )\n parser.add_argument(\n \"--stable_unclip_prior\",\n type=str,\n default=None,\n required=False,\n help=\"Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.\",\n )\n parser.add_argument(\n \"--clip_stats_path\",\n type=str,\n help=\"Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.\",\n required=False,\n )\n parser.add_argument(\n \"--controlnet\", action=\"store_true\", default=None, help=\"Set flag if this is a controlnet checkpoint.\"\n )\n parser.add_argument(\"--half\", action=\"store_true\", help=\"Save weights in half precision.\")\n parser.add_argument(\n \"--vae_path\",\n type=str,\n default=None,\n required=False,\n help=\"Set to a path, hub id to an already converted vae to not convert it again.\",\n )\n parser.add_argument(\n \"--pipeline_class_name\",\n type=str,\n default=None,\n required=False,\n help=\"Specify the pipeline class name\",\n )\n\n args = parser.parse_args()\n\n if args.pipeline_class_name is not None:\n library = importlib.import_module(\"diffusers\")\n class_obj = getattr(library, args.pipeline_class_name)\n pipeline_class = class_obj\n else:\n pipeline_class = None\n\n pipe = download_from_original_stable_diffusion_ckpt(\n checkpoint_path_or_dict=args.checkpoint_path,\n original_config_file=args.original_config_file,\n config_files=args.config_files,\n image_size=args.image_size,\n prediction_type=args.prediction_type,\n model_type=args.pipeline_type,\n extract_ema=args.extract_ema,\n scheduler_type=args.scheduler_type,\n num_in_channels=args.num_in_channels,\n upcast_attention=args.upcast_attention,\n from_safetensors=args.from_safetensors,\n device=args.device,\n stable_unclip=args.stable_unclip,\n stable_unclip_prior=args.stable_unclip_prior,\n clip_stats_path=args.clip_stats_path,\n controlnet=args.controlnet,\n vae_path=args.vae_path,\n pipeline_class=pipeline_class,\n )\n\n if args.half:\n pipe.to(torch_dtype=torch.float16)\n\n if args.controlnet:\n # only save the controlnet model\n pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)\n else:\n pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)\n", "path": "scripts/convert_original_stable_diffusion_to_diffusers.py"}]}
| 2,399 | 145 |
gh_patches_debug_38322
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-3533
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSS broken in latest docs
## 🐛 Bug

It seems like the versionwarning banner is not displayed correctly in latest docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2
3 # -- Path setup --------------------------------------------------------------
4
5 import atexit
6 import os
7 import shutil
8 import subprocess
9 import sys
10 from pathlib import Path
11 from typing import Any
12 from unittest import mock
13
14 import micropip
15
16 panels_add_bootstrap_css = False
17
18 # -- Project information -----------------------------------------------------
19
20 project = "Pyodide"
21 copyright = "2019-2022, Pyodide contributors and Mozilla"
22
23 # -- General configuration ---------------------------------------------------
24
25 # If your documentation needs a minimal Sphinx version, state it here.
26 #
27 # needs_sphinx = '1.0'
28
29 extensions = [
30 "sphinx.ext.autodoc",
31 "sphinx.ext.autosummary",
32 "sphinx.ext.intersphinx",
33 "sphinx.ext.napoleon",
34 "myst_parser",
35 "sphinx_js",
36 "sphinx_click",
37 "autodocsumm",
38 "sphinx_pyodide",
39 "sphinx_argparse_cli",
40 "versionwarning.extension",
41 "sphinx_issues",
42 "sphinx_autodoc_typehints",
43 "sphinx_design", # Used for tabs in building-from-sources.md
44 ]
45
46
47 myst_enable_extensions = ["substitution"]
48
49 js_language = "typescript"
50 jsdoc_config_path = "../src/js/tsconfig.json"
51 root_for_relative_js_paths = "../src/"
52 issues_github_path = "pyodide/pyodide"
53
54 versionwarning_messages = {
55 "latest": (
56 "This is the development version of the documentation. "
57 'See <a href="https://pyodide.org/">here</a> for latest stable '
58 "documentation. Please do not use Pyodide with non "
59 "versioned (`dev`) URLs from the CDN for deployed applications!"
60 )
61 }
62 versionwarning_body_selector = "#main-content > div"
63
64 autosummary_generate = True
65 autodoc_default_flags = ["members", "inherited-members"]
66
67 intersphinx_mapping = {
68 "python": ("https://docs.python.org/3.10", None),
69 "micropip": (f"https://micropip.pyodide.org/en/v{micropip.__version__}/", None),
70 "numpy": ("https://numpy.org/doc/stable/", None),
71 }
72
73 # Add modules to be mocked.
74 mock_modules = ["tomli"]
75
76 # Add any paths that contain templates here, relative to this directory.
77 templates_path = ["_templates"]
78
79 # The suffix(es) of source filenames.
80 source_suffix = [".rst", ".md"]
81
82 # The master toctree document.
83 master_doc = "index"
84
85 # List of patterns, relative to source directory, that match files and
86 # directories to ignore when looking for source files.
87 exclude_patterns = [
88 "_build",
89 "Thumbs.db",
90 ".DS_Store",
91 "README.md",
92 "sphinx_pyodide",
93 ".*",
94 ]
95
96 # The name of the Pygments (syntax highlighting) style to use.
97 pygments_style = None
98
99 # -- Options for HTML output -------------------------------------------------
100
101 # The theme to use for HTML and HTML Help pages. See the documentation for
102 # a list of builtin themes.
103 #
104 html_theme = "sphinx_book_theme"
105 html_logo = "_static/img/pyodide-logo.png"
106
107 # theme-specific options
108 html_theme_options: dict[str, Any] = {}
109
110 # paths that contain custom static files (such as style sheets)
111 html_static_path = ["_static"]
112
113
114 html_css_files = [
115 "css/pyodide.css",
116 ]
117
118 # Custom sidebar templates, must be a dictionary that maps document names
119 # to template names.
120 # html_sidebars = {}
121
122 # -- Options for HTMLHelp output ---------------------------------------------
123
124 # Output file base name for HTML help builder.
125 htmlhelp_basename = "Pyodidedoc"
126
127 # A list of files that should not be packed into the epub file.
128 epub_exclude_files = ["search.html"]
129
130 # Try not to cause side effects if we are imported incidentally.
131
132 IN_SPHINX = "sphinx" in sys.modules and hasattr(sys.modules["sphinx"], "application")
133 IN_READTHEDOCS = "READTHEDOCS" in os.environ
134
135
136 base_dir = Path(__file__).resolve().parent.parent
137 extra_sys_path_dirs = [
138 str(base_dir),
139 str(base_dir / "pyodide-build"),
140 str(base_dir / "src/py"),
141 str(base_dir / "packages/micropip/src"),
142 ]
143
144
145 if IN_SPHINX:
146 # sphinx_pyodide is imported before setup() is called because it's a sphinx
147 # extension, so we need it to be on the path early. Everything else can be
148 # added to the path in setup().
149 #
150 # TODO: pip install -e sphinx-pyodide instead.
151 sys.path = [str(base_dir / "docs/sphinx_pyodide")] + sys.path
152
153
154 def patch_docs_argspec():
155 import builtins
156
157 from sphinx_pyodide.util import docs_argspec
158
159 # override docs_argspec, _pyodide.docs_argspec will read this value back.
160 # Must do this before importing pyodide!
161 setattr(builtins, "--docs_argspec--", docs_argspec)
162
163
164 def patch_inspect():
165 # Monkey patch for python3.11 incompatible code
166 import inspect
167
168 if not hasattr(inspect, "getargspec"):
169 inspect.getargspec = inspect.getfullargspec # type: ignore[assignment]
170
171
172 def prevent_parens_after_js_class_xrefs():
173 from sphinx.domains.javascript import JavaScriptDomain, JSXRefRole
174
175 JavaScriptDomain.roles["class"] = JSXRefRole()
176
177
178 def apply_patches():
179 patch_docs_argspec()
180 patch_inspect()
181 prevent_parens_after_js_class_xrefs()
182
183
184 def calculate_pyodide_version(app):
185 import pyodide
186
187 config = app.config
188
189 # The full version, including alpha/beta/rc tags.
190 config.release = config.version = version = pyodide.__version__
191
192 if ".dev" in version or os.environ.get("READTHEDOCS_VERSION") == "latest":
193 CDN_URL = "https://cdn.jsdelivr.net/pyodide/dev/full/"
194 else:
195 CDN_URL = f"https://cdn.jsdelivr.net/pyodide/v{version}/full/"
196
197 app.config.CDN_URL = CDN_URL
198 app.config.html_title = f"Version {version}"
199
200 app.config.global_replacements = {
201 "{{PYODIDE_CDN_URL}}": CDN_URL,
202 "{{VERSION}}": version,
203 }
204
205
206 def write_console_html(app):
207 # Make console.html file
208 env = {"PYODIDE_BASE_URL": app.config.CDN_URL}
209 os.makedirs(app.outdir, exist_ok=True)
210 os.makedirs("../dist", exist_ok=True)
211 res = subprocess.check_output(
212 ["make", "-C", "..", "dist/console.html"],
213 env=env,
214 stderr=subprocess.STDOUT,
215 encoding="utf-8",
216 )
217 print(res)
218
219 # insert the Plausible analytics script to console.html
220 console_html_lines = (
221 Path("../dist/console.html").read_text().splitlines(keepends=True)
222 )
223 for idx, line in enumerate(list(console_html_lines)):
224 if 'pyodide.js">' in line:
225 # insert the analytics script after the `pyodide.js` script
226 console_html_lines.insert(
227 idx,
228 '<script defer data-domain="pyodide.org" src="https://plausible.io/js/plausible.js"></script>\n',
229 )
230 break
231 else:
232 raise ValueError("Could not find pyodide.js in the <head> section")
233 output_path = Path(app.outdir) / "console.html"
234 output_path.write_text("".join(console_html_lines))
235
236
237 def ensure_typedoc_on_path():
238 if shutil.which("typedoc"):
239 return
240 os.environ["PATH"] += f':{str(Path("../src/js/node_modules/.bin").resolve())}'
241 print(os.environ["PATH"])
242 if shutil.which("typedoc"):
243 return
244 if IN_READTHEDOCS:
245 subprocess.run(["npm", "ci"], cwd="../src/js")
246 if shutil.which("typedoc"):
247 return
248 raise Exception(
249 "Before building the Pyodide docs you must run 'npm install' in 'src/js'."
250 )
251
252
253 def create_generated_typescript_files(app):
254 shutil.copy("../src/core/pyproxy.ts", "../src/js/pyproxy.gen.ts")
255 shutil.copy("../src/core/error_handling.ts", "../src/js/error_handling.gen.ts")
256 app.config.js_source_path = [str(x) for x in Path("../src/js").glob("*.ts")]
257
258 def remove_pyproxy_gen_ts():
259 Path("../src/js/pyproxy.gen.ts").unlink(missing_ok=True)
260
261 atexit.register(remove_pyproxy_gen_ts)
262
263
264 def prune_webloop_docs():
265 # Prevent API docs for webloop methods: they are the same as for base event loop
266 # and it clutters api docs too much
267 from sphinx_pyodide.util import delete_attrs
268
269 import pyodide.console
270 import pyodide.webloop
271
272 delete_attrs(pyodide.webloop.WebLoop)
273 delete_attrs(pyodide.webloop.WebLoopPolicy)
274 delete_attrs(pyodide.console.PyodideConsole)
275
276 for module in mock_modules:
277 sys.modules[module] = mock.Mock()
278
279
280 def prune_jsproxy_constructor_docs():
281 from pyodide.ffi import JsProxy
282
283 del JsProxy.__new__
284
285
286 def prune_docs():
287 prune_webloop_docs()
288 prune_jsproxy_constructor_docs()
289
290
291 # https://github.com/sphinx-doc/sphinx/issues/4054
292 def global_replace(app, docname, source):
293 result = source[0]
294 for key in app.config.global_replacements:
295 result = result.replace(key, app.config.global_replacements[key])
296 source[0] = result
297
298
299 always_document_param_types = True
300
301
302 def typehints_formatter(annotation, config):
303 """Adjust the rendering of various types that sphinx_autodoc_typehints mishandles"""
304 from sphinx_autodoc_typehints import (
305 get_annotation_class_name,
306 get_annotation_module,
307 )
308
309 try:
310 module = get_annotation_module(annotation)
311 class_name = get_annotation_class_name(annotation, module)
312 except ValueError:
313 return None
314 full_name = f"{module}.{class_name}"
315 if full_name == "typing.TypeVar":
316 # The way sphinx-autodoc-typehints renders TypeVar is too noisy for my
317 # taste
318 return f"``{annotation.__name__}``"
319 if full_name == "ast.Module":
320 return "`Module <https://docs.python.org/3/library/ast.html#module-ast>`_"
321 return None
322
323
324 def setup(app):
325 sys.path = extra_sys_path_dirs + sys.path
326 app.add_config_value("global_replacements", {}, True)
327 app.add_config_value("CDN_URL", "", True)
328 app.connect("source-read", global_replace)
329
330 apply_patches()
331 calculate_pyodide_version(app)
332 ensure_typedoc_on_path()
333 create_generated_typescript_files(app)
334 write_console_html(app)
335 prune_docs()
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -37,7 +37,6 @@
"autodocsumm",
"sphinx_pyodide",
"sphinx_argparse_cli",
- "versionwarning.extension",
"sphinx_issues",
"sphinx_autodoc_typehints",
"sphinx_design", # Used for tabs in building-from-sources.md
@@ -51,15 +50,12 @@
root_for_relative_js_paths = "../src/"
issues_github_path = "pyodide/pyodide"
-versionwarning_messages = {
- "latest": (
- "This is the development version of the documentation. "
- 'See <a href="https://pyodide.org/">here</a> for latest stable '
- "documentation. Please do not use Pyodide with non "
- "versioned (`dev`) URLs from the CDN for deployed applications!"
- )
-}
-versionwarning_body_selector = "#main-content > div"
+versionwarning_message = (
+ "This is the development version of the documentation. "
+ 'See <a href="https://pyodide.org/">here</a> for latest stable '
+ "documentation. Please do not use Pyodide with non "
+ "versioned (`dev`) URLs from the CDN for deployed applications!"
+)
autosummary_generate = True
autodoc_default_flags = ["members", "inherited-members"]
@@ -105,7 +101,9 @@
html_logo = "_static/img/pyodide-logo.png"
# theme-specific options
-html_theme_options: dict[str, Any] = {}
+html_theme_options: dict[str, Any] = {
+ "announcement": "",
+}
# paths that contain custom static files (such as style sheets)
html_static_path = ["_static"]
@@ -131,6 +129,9 @@
IN_SPHINX = "sphinx" in sys.modules and hasattr(sys.modules["sphinx"], "application")
IN_READTHEDOCS = "READTHEDOCS" in os.environ
+IN_READTHEDOCS_LATEST = (
+ IN_READTHEDOCS and os.environ.get("READTHEDOCS_VERSION") == "latest"
+)
base_dir = Path(__file__).resolve().parent.parent
@@ -203,6 +204,12 @@
}
+def set_announcement_message():
+ html_theme_options["announcement"] = (
+ versionwarning_message if IN_READTHEDOCS_LATEST else ""
+ )
+
+
def write_console_html(app):
# Make console.html file
env = {"PYODIDE_BASE_URL": app.config.CDN_URL}
@@ -327,6 +334,7 @@
app.add_config_value("CDN_URL", "", True)
app.connect("source-read", global_replace)
+ set_announcement_message()
apply_patches()
calculate_pyodide_version(app)
ensure_typedoc_on_path()
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -37,7 +37,6 @@\n \"autodocsumm\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n- \"versionwarning.extension\",\n \"sphinx_issues\",\n \"sphinx_autodoc_typehints\",\n \"sphinx_design\", # Used for tabs in building-from-sources.md\n@@ -51,15 +50,12 @@\n root_for_relative_js_paths = \"../src/\"\n issues_github_path = \"pyodide/pyodide\"\n \n-versionwarning_messages = {\n- \"latest\": (\n- \"This is the development version of the documentation. \"\n- 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n- \"documentation. Please do not use Pyodide with non \"\n- \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n- )\n-}\n-versionwarning_body_selector = \"#main-content > div\"\n+versionwarning_message = (\n+ \"This is the development version of the documentation. \"\n+ 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n+ \"documentation. Please do not use Pyodide with non \"\n+ \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n+)\n \n autosummary_generate = True\n autodoc_default_flags = [\"members\", \"inherited-members\"]\n@@ -105,7 +101,9 @@\n html_logo = \"_static/img/pyodide-logo.png\"\n \n # theme-specific options\n-html_theme_options: dict[str, Any] = {}\n+html_theme_options: dict[str, Any] = {\n+ \"announcement\": \"\",\n+}\n \n # paths that contain custom static files (such as style sheets)\n html_static_path = [\"_static\"]\n@@ -131,6 +129,9 @@\n \n IN_SPHINX = \"sphinx\" in sys.modules and hasattr(sys.modules[\"sphinx\"], \"application\")\n IN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n+IN_READTHEDOCS_LATEST = (\n+ IN_READTHEDOCS and os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\"\n+)\n \n \n base_dir = Path(__file__).resolve().parent.parent\n@@ -203,6 +204,12 @@\n }\n \n \n+def set_announcement_message():\n+ html_theme_options[\"announcement\"] = (\n+ versionwarning_message if IN_READTHEDOCS_LATEST else \"\"\n+ )\n+\n+\n def write_console_html(app):\n # Make console.html file\n env = {\"PYODIDE_BASE_URL\": app.config.CDN_URL}\n@@ -327,6 +334,7 @@\n app.add_config_value(\"CDN_URL\", \"\", True)\n app.connect(\"source-read\", global_replace)\n \n+ set_announcement_message()\n apply_patches()\n calculate_pyodide_version(app)\n ensure_typedoc_on_path()\n", "issue": "CSS broken in latest docs\n## \ud83d\udc1b Bug\r\n\r\n\r\n\r\nIt seems like the versionwarning banner is not displayed correctly in latest docs.\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\nimport micropip\n\npanels_add_bootstrap_css = False\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"sphinx_click\",\n \"autodocsumm\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"versionwarning.extension\",\n \"sphinx_issues\",\n \"sphinx_autodoc_typehints\",\n \"sphinx_design\", # Used for tabs in building-from-sources.md\n]\n\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n )\n}\nversionwarning_body_selector = \"#main-content > div\"\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.10\", None),\n \"micropip\": (f\"https://micropip.pyodide.org/en/v{micropip.__version__}/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n}\n\n# Add modules to be mocked.\nmock_modules = [\"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n \"_build\",\n \"Thumbs.db\",\n \".DS_Store\",\n \"README.md\",\n \"sphinx_pyodide\",\n \".*\",\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# Try not to cause side effects if we are imported incidentally.\n\nIN_SPHINX = \"sphinx\" in sys.modules and hasattr(sys.modules[\"sphinx\"], \"application\")\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\n\n\nbase_dir = Path(__file__).resolve().parent.parent\nextra_sys_path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n]\n\n\nif IN_SPHINX:\n # sphinx_pyodide is imported before setup() is called because it's a sphinx\n # extension, so we need it to be on the path early. Everything else can be\n # added to the path in setup().\n #\n # TODO: pip install -e sphinx-pyodide instead.\n sys.path = [str(base_dir / \"docs/sphinx_pyodide\")] + sys.path\n\n\ndef patch_docs_argspec():\n import builtins\n\n from sphinx_pyodide.util import docs_argspec\n\n # override docs_argspec, _pyodide.docs_argspec will read this value back.\n # Must do this before importing pyodide!\n setattr(builtins, \"--docs_argspec--\", docs_argspec)\n\n\ndef patch_inspect():\n # Monkey patch for python3.11 incompatible code\n import inspect\n\n if not hasattr(inspect, \"getargspec\"):\n inspect.getargspec = inspect.getfullargspec # type: ignore[assignment]\n\n\ndef prevent_parens_after_js_class_xrefs():\n from sphinx.domains.javascript import JavaScriptDomain, JSXRefRole\n\n JavaScriptDomain.roles[\"class\"] = JSXRefRole()\n\n\ndef apply_patches():\n patch_docs_argspec()\n patch_inspect()\n prevent_parens_after_js_class_xrefs()\n\n\ndef calculate_pyodide_version(app):\n import pyodide\n\n config = app.config\n\n # The full version, including alpha/beta/rc tags.\n config.release = config.version = version = pyodide.__version__\n\n if \".dev\" in version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\n else:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{version}/full/\"\n\n app.config.CDN_URL = CDN_URL\n app.config.html_title = f\"Version {version}\"\n\n app.config.global_replacements = {\n \"{{PYODIDE_CDN_URL}}\": CDN_URL,\n \"{{VERSION}}\": version,\n }\n\n\ndef write_console_html(app):\n # Make console.html file\n env = {\"PYODIDE_BASE_URL\": app.config.CDN_URL}\n os.makedirs(app.outdir, exist_ok=True)\n os.makedirs(\"../dist\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"dist/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\n # insert the Plausible analytics script to console.html\n console_html_lines = (\n Path(\"../dist/console.html\").read_text().splitlines(keepends=True)\n )\n for idx, line in enumerate(list(console_html_lines)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html_lines.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n output_path = Path(app.outdir) / \"console.html\"\n output_path.write_text(\"\".join(console_html_lines))\n\n\ndef ensure_typedoc_on_path():\n if shutil.which(\"typedoc\"):\n return\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if shutil.which(\"typedoc\"):\n return\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n if shutil.which(\"typedoc\"):\n return\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n\ndef create_generated_typescript_files(app):\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n app.config.js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n\ndef prune_webloop_docs():\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n from sphinx_pyodide.util import delete_attrs\n\n import pyodide.console\n import pyodide.webloop\n\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\ndef prune_jsproxy_constructor_docs():\n from pyodide.ffi import JsProxy\n\n del JsProxy.__new__\n\n\ndef prune_docs():\n prune_webloop_docs()\n prune_jsproxy_constructor_docs()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef global_replace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nalways_document_param_types = True\n\n\ndef typehints_formatter(annotation, config):\n \"\"\"Adjust the rendering of various types that sphinx_autodoc_typehints mishandles\"\"\"\n from sphinx_autodoc_typehints import (\n get_annotation_class_name,\n get_annotation_module,\n )\n\n try:\n module = get_annotation_module(annotation)\n class_name = get_annotation_class_name(annotation, module)\n except ValueError:\n return None\n full_name = f\"{module}.{class_name}\"\n if full_name == \"typing.TypeVar\":\n # The way sphinx-autodoc-typehints renders TypeVar is too noisy for my\n # taste\n return f\"``{annotation.__name__}``\"\n if full_name == \"ast.Module\":\n return \"`Module <https://docs.python.org/3/library/ast.html#module-ast>`_\"\n return None\n\n\ndef setup(app):\n sys.path = extra_sys_path_dirs + sys.path\n app.add_config_value(\"global_replacements\", {}, True)\n app.add_config_value(\"CDN_URL\", \"\", True)\n app.connect(\"source-read\", global_replace)\n\n apply_patches()\n calculate_pyodide_version(app)\n ensure_typedoc_on_path()\n create_generated_typescript_files(app)\n write_console_html(app)\n prune_docs()\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport atexit\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Any\nfrom unittest import mock\n\nimport micropip\n\npanels_add_bootstrap_css = False\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2022, Pyodide contributors and Mozilla\"\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"sphinx_click\",\n \"autodocsumm\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n \"sphinx_issues\",\n \"sphinx_autodoc_typehints\",\n \"sphinx_design\", # Used for tabs in building-from-sources.md\n]\n\n\nmyst_enable_extensions = [\"substitution\"]\n\njs_language = \"typescript\"\njsdoc_config_path = \"../src/js/tsconfig.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_message = (\n \"This is the development version of the documentation. \"\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\"\n)\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.10\", None),\n \"micropip\": (f\"https://micropip.pyodide.org/en/v{micropip.__version__}/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n}\n\n# Add modules to be mocked.\nmock_modules = [\"tomli\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n \"_build\",\n \"Thumbs.db\",\n \".DS_Store\",\n \"README.md\",\n \"sphinx_pyodide\",\n \".*\",\n]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\n\n# theme-specific options\nhtml_theme_options: dict[str, Any] = {\n \"announcement\": \"\",\n}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# Try not to cause side effects if we are imported incidentally.\n\nIN_SPHINX = \"sphinx\" in sys.modules and hasattr(sys.modules[\"sphinx\"], \"application\")\nIN_READTHEDOCS = \"READTHEDOCS\" in os.environ\nIN_READTHEDOCS_LATEST = (\n IN_READTHEDOCS and os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\"\n)\n\n\nbase_dir = Path(__file__).resolve().parent.parent\nextra_sys_path_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n]\n\n\nif IN_SPHINX:\n # sphinx_pyodide is imported before setup() is called because it's a sphinx\n # extension, so we need it to be on the path early. Everything else can be\n # added to the path in setup().\n #\n # TODO: pip install -e sphinx-pyodide instead.\n sys.path = [str(base_dir / \"docs/sphinx_pyodide\")] + sys.path\n\n\ndef patch_docs_argspec():\n import builtins\n\n from sphinx_pyodide.util import docs_argspec\n\n # override docs_argspec, _pyodide.docs_argspec will read this value back.\n # Must do this before importing pyodide!\n setattr(builtins, \"--docs_argspec--\", docs_argspec)\n\n\ndef patch_inspect():\n # Monkey patch for python3.11 incompatible code\n import inspect\n\n if not hasattr(inspect, \"getargspec\"):\n inspect.getargspec = inspect.getfullargspec # type: ignore[assignment]\n\n\ndef prevent_parens_after_js_class_xrefs():\n from sphinx.domains.javascript import JavaScriptDomain, JSXRefRole\n\n JavaScriptDomain.roles[\"class\"] = JSXRefRole()\n\n\ndef apply_patches():\n patch_docs_argspec()\n patch_inspect()\n prevent_parens_after_js_class_xrefs()\n\n\ndef calculate_pyodide_version(app):\n import pyodide\n\n config = app.config\n\n # The full version, including alpha/beta/rc tags.\n config.release = config.version = version = pyodide.__version__\n\n if \".dev\" in version or os.environ.get(\"READTHEDOCS_VERSION\") == \"latest\":\n CDN_URL = \"https://cdn.jsdelivr.net/pyodide/dev/full/\"\n else:\n CDN_URL = f\"https://cdn.jsdelivr.net/pyodide/v{version}/full/\"\n\n app.config.CDN_URL = CDN_URL\n app.config.html_title = f\"Version {version}\"\n\n app.config.global_replacements = {\n \"{{PYODIDE_CDN_URL}}\": CDN_URL,\n \"{{VERSION}}\": version,\n }\n\n\ndef set_announcement_message():\n html_theme_options[\"announcement\"] = (\n versionwarning_message if IN_READTHEDOCS_LATEST else \"\"\n )\n\n\ndef write_console_html(app):\n # Make console.html file\n env = {\"PYODIDE_BASE_URL\": app.config.CDN_URL}\n os.makedirs(app.outdir, exist_ok=True)\n os.makedirs(\"../dist\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"dist/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\n # insert the Plausible analytics script to console.html\n console_html_lines = (\n Path(\"../dist/console.html\").read_text().splitlines(keepends=True)\n )\n for idx, line in enumerate(list(console_html_lines)):\n if 'pyodide.js\">' in line:\n # insert the analytics script after the `pyodide.js` script\n console_html_lines.insert(\n idx,\n '<script defer data-domain=\"pyodide.org\" src=\"https://plausible.io/js/plausible.js\"></script>\\n',\n )\n break\n else:\n raise ValueError(\"Could not find pyodide.js in the <head> section\")\n output_path = Path(app.outdir) / \"console.html\"\n output_path.write_text(\"\".join(console_html_lines))\n\n\ndef ensure_typedoc_on_path():\n if shutil.which(\"typedoc\"):\n return\n os.environ[\"PATH\"] += f':{str(Path(\"../src/js/node_modules/.bin\").resolve())}'\n print(os.environ[\"PATH\"])\n if shutil.which(\"typedoc\"):\n return\n if IN_READTHEDOCS:\n subprocess.run([\"npm\", \"ci\"], cwd=\"../src/js\")\n if shutil.which(\"typedoc\"):\n return\n raise Exception(\n \"Before building the Pyodide docs you must run 'npm install' in 'src/js'.\"\n )\n\n\ndef create_generated_typescript_files(app):\n shutil.copy(\"../src/core/pyproxy.ts\", \"../src/js/pyproxy.gen.ts\")\n shutil.copy(\"../src/core/error_handling.ts\", \"../src/js/error_handling.gen.ts\")\n app.config.js_source_path = [str(x) for x in Path(\"../src/js\").glob(\"*.ts\")]\n\n def remove_pyproxy_gen_ts():\n Path(\"../src/js/pyproxy.gen.ts\").unlink(missing_ok=True)\n\n atexit.register(remove_pyproxy_gen_ts)\n\n\ndef prune_webloop_docs():\n # Prevent API docs for webloop methods: they are the same as for base event loop\n # and it clutters api docs too much\n from sphinx_pyodide.util import delete_attrs\n\n import pyodide.console\n import pyodide.webloop\n\n delete_attrs(pyodide.webloop.WebLoop)\n delete_attrs(pyodide.webloop.WebLoopPolicy)\n delete_attrs(pyodide.console.PyodideConsole)\n\n for module in mock_modules:\n sys.modules[module] = mock.Mock()\n\n\ndef prune_jsproxy_constructor_docs():\n from pyodide.ffi import JsProxy\n\n del JsProxy.__new__\n\n\ndef prune_docs():\n prune_webloop_docs()\n prune_jsproxy_constructor_docs()\n\n\n# https://github.com/sphinx-doc/sphinx/issues/4054\ndef global_replace(app, docname, source):\n result = source[0]\n for key in app.config.global_replacements:\n result = result.replace(key, app.config.global_replacements[key])\n source[0] = result\n\n\nalways_document_param_types = True\n\n\ndef typehints_formatter(annotation, config):\n \"\"\"Adjust the rendering of various types that sphinx_autodoc_typehints mishandles\"\"\"\n from sphinx_autodoc_typehints import (\n get_annotation_class_name,\n get_annotation_module,\n )\n\n try:\n module = get_annotation_module(annotation)\n class_name = get_annotation_class_name(annotation, module)\n except ValueError:\n return None\n full_name = f\"{module}.{class_name}\"\n if full_name == \"typing.TypeVar\":\n # The way sphinx-autodoc-typehints renders TypeVar is too noisy for my\n # taste\n return f\"``{annotation.__name__}``\"\n if full_name == \"ast.Module\":\n return \"`Module <https://docs.python.org/3/library/ast.html#module-ast>`_\"\n return None\n\n\ndef setup(app):\n sys.path = extra_sys_path_dirs + sys.path\n app.add_config_value(\"global_replacements\", {}, True)\n app.add_config_value(\"CDN_URL\", \"\", True)\n app.connect(\"source-read\", global_replace)\n\n set_announcement_message()\n apply_patches()\n calculate_pyodide_version(app)\n ensure_typedoc_on_path()\n create_generated_typescript_files(app)\n write_console_html(app)\n prune_docs()\n", "path": "docs/conf.py"}]}
| 3,677 | 670 |
gh_patches_debug_5105
|
rasdani/github-patches
|
git_diff
|
encode__httpx-2442
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support httpcore>=0.16.0
Hi,
I have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements.
Now the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`.
Can the constraint for `httpcore` be updated to `>=0.16.0` here?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import sys
2
3 from setuptools import setup
4
5 sys.stderr.write(
6 """
7 ===============================
8 Unsupported installation method
9 ===============================
10 httpx no longer supports installation with `python setup.py install`.
11 Please use `python -m pip install .` instead.
12 """
13 )
14 sys.exit(1)
15
16
17 # The below code will never execute, however GitHub is particularly
18 # picky about where it finds Python packaging metadata.
19 # See: https://github.com/github/feedback/discussions/6456
20 #
21 # To be removed once GitHub catches up.
22
23 setup(
24 name="httpx",
25 install_requires=[
26 "certifi",
27 "sniffio",
28 "rfc3986[idna2008]>=1.3,<2",
29 "httpcore>=0.15.0,<0.16.0",
30 ],
31 )
32
```
Path: `httpx/__version__.py`
Content:
```
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.23.0"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.23.0"
+__version__ = "0.23.1"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,6 +26,6 @@
"certifi",
"sniffio",
"rfc3986[idna2008]>=1.3,<2",
- "httpcore>=0.15.0,<0.16.0",
+ "httpcore>=0.15.0,<0.17.0",
],
)
|
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.23.0\"\n+__version__ = \"0.23.1\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,6 +26,6 @@\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n- \"httpcore>=0.15.0,<0.16.0\",\n+ \"httpcore>=0.15.0,<0.17.0\",\n ],\n )\n", "issue": "Support httpcore>=0.16.0\nHi,\r\n\r\nI have a project that uses `fastapi` and `uvicorn` which demands `h11>=0.8`. Latest version of `fastapi` changed their test suite from `requests` to `httpx`. So I had to add `httpx` to my tests requirements.\r\n\r\nNow the problem is, that `httpx` requires `httpcore`, which since version `0.16.0` allows for `h11==0.14.0`, but the current version of `httpcore` required by `httpx` doesn't, so I end up having to downgrade the `h11==0.14.0` which I have to `0.12.0`.\r\n\r\nCan the constraint for `httpcore` be updated to `>=0.16.0` here?\n", "before_files": [{"content": "import sys\n\nfrom setuptools import setup\n\nsys.stderr.write(\n \"\"\"\n===============================\nUnsupported installation method\n===============================\nhttpx no longer supports installation with `python setup.py install`.\nPlease use `python -m pip install .` instead.\n\"\"\"\n)\nsys.exit(1)\n\n\n# The below code will never execute, however GitHub is particularly\n# picky about where it finds Python packaging metadata.\n# See: https://github.com/github/feedback/discussions/6456\n#\n# To be removed once GitHub catches up.\n\nsetup(\n name=\"httpx\",\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.15.0,<0.16.0\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.23.0\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "import sys\n\nfrom setuptools import setup\n\nsys.stderr.write(\n \"\"\"\n===============================\nUnsupported installation method\n===============================\nhttpx no longer supports installation with `python setup.py install`.\nPlease use `python -m pip install .` instead.\n\"\"\"\n)\nsys.exit(1)\n\n\n# The below code will never execute, however GitHub is particularly\n# picky about where it finds Python packaging metadata.\n# See: https://github.com/github/feedback/discussions/6456\n#\n# To be removed once GitHub catches up.\n\nsetup(\n name=\"httpx\",\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.15.0,<0.17.0\",\n ],\n)\n", "path": "setup.py"}, {"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.23.1\"\n", "path": "httpx/__version__.py"}]}
| 731 | 203 |
gh_patches_debug_18193
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3626
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type hints to toilDebugJob.py
Add type hints to src/toil/utils/toilDebugJob.py so it can be checked by mypy during linting.
Refers to #3568.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-906)
┆Issue Number: TOIL-906
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/utils/toilDebugJob.py`
Content:
```
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Debug tool for running a toil job locally."""
15 import logging
16
17 from toil.common import Config, Toil, parser_with_common_options
18 from toil.statsAndLogging import set_logging_from_options
19 from toil.utils.toilDebugFile import printContentsOfJobStore
20 from toil.worker import workerScript
21
22 logger = logging.getLogger(__name__)
23
24
25 def main():
26 parser = parser_with_common_options(jobstore_option=True)
27 parser.add_argument("jobID", nargs=1,
28 help="The job store id of a job within the provided jobstore to run by itself.")
29 parser.add_argument("--printJobInfo", nargs=1,
30 help="Return information about this job to the user including preceding jobs, "
31 "inputs, outputs, and runtime from the last known run.")
32
33 options = parser.parse_args()
34 set_logging_from_options(options)
35 config = Config()
36 config.setOptions(options)
37
38 jobStore = Toil.resumeJobStore(config.jobStore)
39
40 if options.printJobInfo:
41 printContentsOfJobStore(jobStorePath=config.jobStore, nameOfJob=options.printJobInfo)
42
43 # TODO: Option to print list of successor jobs
44 # TODO: Option to run job within python debugger, allowing step through of arguments
45 # idea would be to have option to import pdb and set breakpoint at the start of the user's code
46
47 jobID = options.jobID[0]
48 logger.debug(f"Running the following job locally: {jobID}")
49 workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)
50 logger.debug(f"Finished running: {jobID}")
51
```
Path: `contrib/admin/mypy-with-ignore.py`
Content:
```
1 #!/usr/bin/env python3
2 """
3 Runs mypy and ignores files that do not yet have passing type hints.
4
5 Does not type check test files (any path including "src/toil/test").
6 """
7 import os
8 import subprocess
9 import sys
10
11 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
12 sys.path.insert(0, pkg_root) # noqa
13
14 from src.toil.lib.resources import glob # type: ignore
15
16
17 def main():
18 all_files_to_check = []
19 for d in ['dashboard', 'docker', 'docs', 'src']:
20 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))
21
22 # TODO: Remove these paths as typing is added and mypy conflicts are addressed
23 ignore_paths = [os.path.abspath(f) for f in [
24 'docker/Dockerfile.py',
25 'docs/conf.py',
26 'docs/vendor/sphinxcontrib/fulltoc.py',
27 'docs/vendor/sphinxcontrib/__init__.py',
28 'src/toil/job.py',
29 'src/toil/leader.py',
30 'src/toil/statsAndLogging.py',
31 'src/toil/common.py',
32 'src/toil/realtimeLogger.py',
33 'src/toil/worker.py',
34 'src/toil/serviceManager.py',
35 'src/toil/toilState.py',
36 'src/toil/__init__.py',
37 'src/toil/resource.py',
38 'src/toil/deferred.py',
39 'src/toil/version.py',
40 'src/toil/wdl/utils.py',
41 'src/toil/wdl/wdl_types.py',
42 'src/toil/wdl/wdl_synthesis.py',
43 'src/toil/wdl/wdl_analysis.py',
44 'src/toil/wdl/wdl_functions.py',
45 'src/toil/wdl/toilwdl.py',
46 'src/toil/wdl/versions/draft2.py',
47 'src/toil/wdl/versions/v1.py',
48 'src/toil/wdl/versions/dev.py',
49 'src/toil/provisioners/clusterScaler.py',
50 'src/toil/provisioners/abstractProvisioner.py',
51 'src/toil/provisioners/gceProvisioner.py',
52 'src/toil/provisioners/__init__.py',
53 'src/toil/provisioners/node.py',
54 'src/toil/provisioners/aws/boto2Context.py',
55 'src/toil/provisioners/aws/awsProvisioner.py',
56 'src/toil/provisioners/aws/__init__.py',
57 'src/toil/batchSystems/slurm.py',
58 'src/toil/batchSystems/gridengine.py',
59 'src/toil/batchSystems/singleMachine.py',
60 'src/toil/batchSystems/abstractBatchSystem.py',
61 'src/toil/batchSystems/parasol.py',
62 'src/toil/batchSystems/kubernetes.py',
63 'src/toil/batchSystems/torque.py',
64 'src/toil/batchSystems/options.py',
65 'src/toil/batchSystems/registry.py',
66 'src/toil/batchSystems/lsf.py',
67 'src/toil/batchSystems/__init__.py',
68 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',
69 'src/toil/batchSystems/lsfHelper.py',
70 'src/toil/batchSystems/htcondor.py',
71 'src/toil/batchSystems/mesos/batchSystem.py',
72 'src/toil/batchSystems/mesos/executor.py',
73 'src/toil/batchSystems/mesos/conftest.py',
74 'src/toil/batchSystems/mesos/__init__.py',
75 'src/toil/batchSystems/mesos/test/__init__.py',
76 'src/toil/cwl/conftest.py',
77 'src/toil/cwl/__init__.py',
78 'src/toil/cwl/cwltoil.py',
79 'src/toil/fileStores/cachingFileStore.py',
80 'src/toil/fileStores/abstractFileStore.py',
81 'src/toil/fileStores/nonCachingFileStore.py',
82 'src/toil/fileStores/__init__.py',
83 'src/toil/jobStores/utils.py',
84 'src/toil/jobStores/abstractJobStore.py',
85 'src/toil/jobStores/conftest.py',
86 'src/toil/jobStores/fileJobStore.py',
87 'src/toil/jobStores/__init__.py',
88 'src/toil/jobStores/googleJobStore.py',
89 'src/toil/jobStores/aws/utils.py',
90 'src/toil/jobStores/aws/jobStore.py',
91 'src/toil/jobStores/aws/__init__.py',
92 'src/toil/utils/toilDebugFile.py',
93 'src/toil/utils/toilUpdateEC2Instances.py',
94 'src/toil/utils/toilStatus.py',
95 'src/toil/utils/toilStats.py',
96 'src/toil/utils/toilSshCluster.py',
97 'src/toil/utils/toilMain.py',
98 'src/toil/utils/__init__.py',
99 'src/toil/utils/toilDebugJob.py',
100 'src/toil/utils/toilLaunchCluster.py',
101 'src/toil/lib/memoize.py',
102 'src/toil/lib/throttle.py',
103 'src/toil/lib/humanize.py',
104 'src/toil/lib/compatibility.py',
105 'src/toil/lib/iterables.py',
106 'src/toil/lib/bioio.py',
107 'src/toil/lib/ec2.py',
108 # 'src/toil/lib/conversions.py',
109 'src/toil/lib/ec2nodes.py',
110 # 'src/toil/lib/misc.py',
111 'src/toil/lib/expando.py',
112 'src/toil/lib/threading.py',
113 'src/toil/lib/exceptions.py',
114 'src/toil/lib/__init__.py',
115 'src/toil/lib/generatedEC2Lists.py',
116 'src/toil/lib/retry.py',
117 'src/toil/lib/objects.py',
118 'src/toil/lib/io.py',
119 'src/toil/lib/docker.py',
120 'src/toil/lib/encryption/_nacl.py',
121 'src/toil/lib/encryption/_dummy.py',
122 'src/toil/lib/encryption/conftest.py',
123 'src/toil/lib/encryption/__init__.py',
124 'src/toil/lib/aws/utils.py',
125 'src/toil/lib/aws/__init__.py'
126 ]]
127
128 filtered_files_to_check = []
129 for file_path in all_files_to_check:
130 if file_path not in ignore_paths and 'src/toil/test' not in file_path:
131 filtered_files_to_check.append(file_path)
132 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?
133 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check
134 p = subprocess.run(args=args, stdout=subprocess.PIPE)
135 result = p.stdout.decode()
136 print(result)
137 if 'Success: no issues found' not in result:
138 exit(1)
139
140
141 if __name__ == '__main__':
142 main()
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py
--- a/contrib/admin/mypy-with-ignore.py
+++ b/contrib/admin/mypy-with-ignore.py
@@ -96,7 +96,6 @@
'src/toil/utils/toilSshCluster.py',
'src/toil/utils/toilMain.py',
'src/toil/utils/__init__.py',
- 'src/toil/utils/toilDebugJob.py',
'src/toil/utils/toilLaunchCluster.py',
'src/toil/lib/memoize.py',
'src/toil/lib/throttle.py',
diff --git a/src/toil/utils/toilDebugJob.py b/src/toil/utils/toilDebugJob.py
--- a/src/toil/utils/toilDebugJob.py
+++ b/src/toil/utils/toilDebugJob.py
@@ -22,7 +22,7 @@
logger = logging.getLogger(__name__)
-def main():
+def main() -> None:
parser = parser_with_common_options(jobstore_option=True)
parser.add_argument("jobID", nargs=1,
help="The job store id of a job within the provided jobstore to run by itself.")
|
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -96,7 +96,6 @@\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n- 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\ndiff --git a/src/toil/utils/toilDebugJob.py b/src/toil/utils/toilDebugJob.py\n--- a/src/toil/utils/toilDebugJob.py\n+++ b/src/toil/utils/toilDebugJob.py\n@@ -22,7 +22,7 @@\n logger = logging.getLogger(__name__)\n \n \n-def main():\n+def main() -> None:\n parser = parser_with_common_options(jobstore_option=True)\n parser.add_argument(\"jobID\", nargs=1,\n help=\"The job store id of a job within the provided jobstore to run by itself.\")\n", "issue": "Add type hints to toilDebugJob.py\nAdd type hints to src/toil/utils/toilDebugJob.py so it can be checked by mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-906)\n\u2506Issue Number: TOIL-906\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Debug tool for running a toil job locally.\"\"\"\nimport logging\n\nfrom toil.common import Config, Toil, parser_with_common_options\nfrom toil.statsAndLogging import set_logging_from_options\nfrom toil.utils.toilDebugFile import printContentsOfJobStore\nfrom toil.worker import workerScript\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n parser = parser_with_common_options(jobstore_option=True)\n parser.add_argument(\"jobID\", nargs=1,\n help=\"The job store id of a job within the provided jobstore to run by itself.\")\n parser.add_argument(\"--printJobInfo\", nargs=1,\n help=\"Return information about this job to the user including preceding jobs, \"\n \"inputs, outputs, and runtime from the last known run.\")\n\n options = parser.parse_args()\n set_logging_from_options(options)\n config = Config()\n config.setOptions(options)\n\n jobStore = Toil.resumeJobStore(config.jobStore)\n\n if options.printJobInfo:\n printContentsOfJobStore(jobStorePath=config.jobStore, nameOfJob=options.printJobInfo)\n\n # TODO: Option to print list of successor jobs\n # TODO: Option to run job within python debugger, allowing step through of arguments\n # idea would be to have option to import pdb and set breakpoint at the start of the user's code\n\n jobID = options.jobID[0]\n logger.debug(f\"Running the following job locally: {jobID}\")\n workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)\n logger.debug(f\"Finished running: {jobID}\")\n", "path": "src/toil/utils/toilDebugJob.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n # 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}], "after_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Debug tool for running a toil job locally.\"\"\"\nimport logging\n\nfrom toil.common import Config, Toil, parser_with_common_options\nfrom toil.statsAndLogging import set_logging_from_options\nfrom toil.utils.toilDebugFile import printContentsOfJobStore\nfrom toil.worker import workerScript\n\nlogger = logging.getLogger(__name__)\n\n\ndef main() -> None:\n parser = parser_with_common_options(jobstore_option=True)\n parser.add_argument(\"jobID\", nargs=1,\n help=\"The job store id of a job within the provided jobstore to run by itself.\")\n parser.add_argument(\"--printJobInfo\", nargs=1,\n help=\"Return information about this job to the user including preceding jobs, \"\n \"inputs, outputs, and runtime from the last known run.\")\n\n options = parser.parse_args()\n set_logging_from_options(options)\n config = Config()\n config.setOptions(options)\n\n jobStore = Toil.resumeJobStore(config.jobStore)\n\n if options.printJobInfo:\n printContentsOfJobStore(jobStorePath=config.jobStore, nameOfJob=options.printJobInfo)\n\n # TODO: Option to print list of successor jobs\n # TODO: Option to run job within python debugger, allowing step through of arguments\n # idea would be to have option to import pdb and set breakpoint at the start of the user's code\n\n jobID = options.jobID[0]\n logger.debug(f\"Running the following job locally: {jobID}\")\n workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)\n logger.debug(f\"Finished running: {jobID}\")\n", "path": "src/toil/utils/toilDebugJob.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n # 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
| 2,808 | 259 |
gh_patches_debug_33100
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5406
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix SMTP STARTTLS for Twisted >= 21.2.0
## Summary
The [Mail settings](https://docs.scrapy.org/en/latest/topics/email.html#topics-email-settings) don't have an option to choose a TLS version. Only to enforce upgrading connections to use SSL/TLS.
Mail servers like smtp.office365.com dropped support for TLS1.0 and TLS1.1 and now require TLS1.2: https://techcommunity.microsoft.com/t5/exchange-team-blog/new-opt-in-endpoint-available-for-smtp-auth-clients-still/ba-p/2659652
It seems that scrapy mail doesn't support TLS1.2. The error message (with `MAIL_TLS = True`):
`[scrapy.mail] Unable to send mail: To=['[email protected]'] Cc=[] Subject="Test" Attachs=0- 421 b'4.7.66 TLS 1.0 and 1.1 are not supported. Please upgrade/update your client to support TLS 1.2. Visit https://aka.ms/smtp_auth_tls. [AM6P194CA0047.EURP194.PROD.OUTLOOK.COM]'`
## Motivation
Without TLS1.2 it's not possible anymore to send mails via smtp.office365.com. An option to use TLS1.2 would fix this issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/mail.py`
Content:
```
1 """
2 Mail sending helpers
3
4 See documentation in docs/topics/email.rst
5 """
6 import logging
7 from email import encoders as Encoders
8 from email.mime.base import MIMEBase
9 from email.mime.multipart import MIMEMultipart
10 from email.mime.nonmultipart import MIMENonMultipart
11 from email.mime.text import MIMEText
12 from email.utils import formatdate
13 from io import BytesIO
14
15 from twisted.internet import defer, ssl
16
17 from scrapy.utils.misc import arg_to_iter
18 from scrapy.utils.python import to_bytes
19
20
21 logger = logging.getLogger(__name__)
22
23
24 # Defined in the email.utils module, but undocumented:
25 # https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42
26 COMMASPACE = ", "
27
28
29 def _to_bytes_or_none(text):
30 if text is None:
31 return None
32 return to_bytes(text)
33
34
35 class MailSender:
36 def __init__(
37 self, smtphost='localhost', mailfrom='scrapy@localhost', smtpuser=None,
38 smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False
39 ):
40 self.smtphost = smtphost
41 self.smtpport = smtpport
42 self.smtpuser = _to_bytes_or_none(smtpuser)
43 self.smtppass = _to_bytes_or_none(smtppass)
44 self.smtptls = smtptls
45 self.smtpssl = smtpssl
46 self.mailfrom = mailfrom
47 self.debug = debug
48
49 @classmethod
50 def from_settings(cls, settings):
51 return cls(
52 smtphost=settings['MAIL_HOST'],
53 mailfrom=settings['MAIL_FROM'],
54 smtpuser=settings['MAIL_USER'],
55 smtppass=settings['MAIL_PASS'],
56 smtpport=settings.getint('MAIL_PORT'),
57 smtptls=settings.getbool('MAIL_TLS'),
58 smtpssl=settings.getbool('MAIL_SSL'),
59 )
60
61 def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):
62 from twisted.internet import reactor
63 if attachs:
64 msg = MIMEMultipart()
65 else:
66 msg = MIMENonMultipart(*mimetype.split('/', 1))
67
68 to = list(arg_to_iter(to))
69 cc = list(arg_to_iter(cc))
70
71 msg['From'] = self.mailfrom
72 msg['To'] = COMMASPACE.join(to)
73 msg['Date'] = formatdate(localtime=True)
74 msg['Subject'] = subject
75 rcpts = to[:]
76 if cc:
77 rcpts.extend(cc)
78 msg['Cc'] = COMMASPACE.join(cc)
79
80 if charset:
81 msg.set_charset(charset)
82
83 if attachs:
84 msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))
85 for attach_name, mimetype, f in attachs:
86 part = MIMEBase(*mimetype.split('/'))
87 part.set_payload(f.read())
88 Encoders.encode_base64(part)
89 part.add_header('Content-Disposition', 'attachment', filename=attach_name)
90 msg.attach(part)
91 else:
92 msg.set_payload(body)
93
94 if _callback:
95 _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)
96
97 if self.debug:
98 logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
99 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
100 {'mailto': to, 'mailcc': cc, 'mailsubject': subject,
101 'mailattachs': len(attachs)})
102 return
103
104 dfd = self._sendmail(rcpts, msg.as_string().encode(charset or 'utf-8'))
105 dfd.addCallbacks(
106 callback=self._sent_ok,
107 errback=self._sent_failed,
108 callbackArgs=[to, cc, subject, len(attachs)],
109 errbackArgs=[to, cc, subject, len(attachs)],
110 )
111 reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)
112 return dfd
113
114 def _sent_ok(self, result, to, cc, subject, nattachs):
115 logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '
116 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d',
117 {'mailto': to, 'mailcc': cc, 'mailsubject': subject,
118 'mailattachs': nattachs})
119
120 def _sent_failed(self, failure, to, cc, subject, nattachs):
121 errstr = str(failure.value)
122 logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '
123 'Subject="%(mailsubject)s" Attachs=%(mailattachs)d'
124 '- %(mailerr)s',
125 {'mailto': to, 'mailcc': cc, 'mailsubject': subject,
126 'mailattachs': nattachs, 'mailerr': errstr})
127
128 def _sendmail(self, to_addrs, msg):
129 # Import twisted.mail here because it is not available in python3
130 from twisted.internet import reactor
131 from twisted.mail.smtp import ESMTPSenderFactory
132 msg = BytesIO(msg)
133 d = defer.Deferred()
134 factory = ESMTPSenderFactory(
135 self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d,
136 heloFallback=True, requireAuthentication=False, requireTransportSecurity=self.smtptls,
137 )
138 factory.noisy = False
139
140 if self.smtpssl:
141 reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
142 else:
143 reactor.connectTCP(self.smtphost, self.smtpport, factory)
144
145 return d
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/mail.py b/scrapy/mail.py
--- a/scrapy/mail.py
+++ b/scrapy/mail.py
@@ -12,7 +12,9 @@
from email.utils import formatdate
from io import BytesIO
+from twisted.python.versions import Version
from twisted.internet import defer, ssl
+from twisted import version as twisted_version
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.python import to_bytes
@@ -126,16 +128,11 @@
'mailattachs': nattachs, 'mailerr': errstr})
def _sendmail(self, to_addrs, msg):
- # Import twisted.mail here because it is not available in python3
from twisted.internet import reactor
- from twisted.mail.smtp import ESMTPSenderFactory
msg = BytesIO(msg)
d = defer.Deferred()
- factory = ESMTPSenderFactory(
- self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d,
- heloFallback=True, requireAuthentication=False, requireTransportSecurity=self.smtptls,
- )
- factory.noisy = False
+
+ factory = self._create_sender_factory(to_addrs, msg, d)
if self.smtpssl:
reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())
@@ -143,3 +140,20 @@
reactor.connectTCP(self.smtphost, self.smtpport, factory)
return d
+
+ def _create_sender_factory(self, to_addrs, msg, d):
+ from twisted.mail.smtp import ESMTPSenderFactory
+
+ factory_keywords = {
+ 'heloFallback': True,
+ 'requireAuthentication': False,
+ 'requireTransportSecurity': self.smtptls
+ }
+
+ # Newer versions of twisted require the hostname to use STARTTLS
+ if twisted_version >= Version('twisted', 21, 2, 0):
+ factory_keywords['hostname'] = self.smtphost
+
+ factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d, **factory_keywords)
+ factory.noisy = False
+ return factory
|
{"golden_diff": "diff --git a/scrapy/mail.py b/scrapy/mail.py\n--- a/scrapy/mail.py\n+++ b/scrapy/mail.py\n@@ -12,7 +12,9 @@\n from email.utils import formatdate\n from io import BytesIO\n \n+from twisted.python.versions import Version\n from twisted.internet import defer, ssl\n+from twisted import version as twisted_version\n \n from scrapy.utils.misc import arg_to_iter\n from scrapy.utils.python import to_bytes\n@@ -126,16 +128,11 @@\n 'mailattachs': nattachs, 'mailerr': errstr})\n \n def _sendmail(self, to_addrs, msg):\n- # Import twisted.mail here because it is not available in python3\n from twisted.internet import reactor\n- from twisted.mail.smtp import ESMTPSenderFactory\n msg = BytesIO(msg)\n d = defer.Deferred()\n- factory = ESMTPSenderFactory(\n- self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d,\n- heloFallback=True, requireAuthentication=False, requireTransportSecurity=self.smtptls,\n- )\n- factory.noisy = False\n+\n+ factory = self._create_sender_factory(to_addrs, msg, d)\n \n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n@@ -143,3 +140,20 @@\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n \n return d\n+\n+ def _create_sender_factory(self, to_addrs, msg, d):\n+ from twisted.mail.smtp import ESMTPSenderFactory\n+\n+ factory_keywords = {\n+ 'heloFallback': True,\n+ 'requireAuthentication': False,\n+ 'requireTransportSecurity': self.smtptls\n+ }\n+\n+ # Newer versions of twisted require the hostname to use STARTTLS\n+ if twisted_version >= Version('twisted', 21, 2, 0):\n+ factory_keywords['hostname'] = self.smtphost\n+\n+ factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d, **factory_keywords)\n+ factory.noisy = False\n+ return factory\n", "issue": "Fix SMTP STARTTLS for Twisted >= 21.2.0\n## Summary\r\n\r\nThe [Mail settings](https://docs.scrapy.org/en/latest/topics/email.html#topics-email-settings) don't have an option to choose a TLS version. Only to enforce upgrading connections to use SSL/TLS.\r\nMail servers like smtp.office365.com dropped support for TLS1.0 and TLS1.1 and now require TLS1.2: https://techcommunity.microsoft.com/t5/exchange-team-blog/new-opt-in-endpoint-available-for-smtp-auth-clients-still/ba-p/2659652 \r\n\r\nIt seems that scrapy mail doesn't support TLS1.2. The error message (with `MAIL_TLS = True`):\r\n\r\n`[scrapy.mail] Unable to send mail: To=['[email protected]'] Cc=[] Subject=\"Test\" Attachs=0- 421 b'4.7.66 TLS 1.0 and 1.1 are not supported. Please upgrade/update your client to support TLS 1.2. Visit https://aka.ms/smtp_auth_tls. [AM6P194CA0047.EURP194.PROD.OUTLOOK.COM]'` \r\n\r\n## Motivation\r\n\r\nWithout TLS1.2 it's not possible anymore to send mails via smtp.office365.com. An option to use TLS1.2 would fix this issue\r\n\n", "before_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom io import BytesIO\n\nfrom twisted.internet import defer, ssl\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import to_bytes\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Defined in the email.utils module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42\nCOMMASPACE = \", \"\n\n\ndef _to_bytes_or_none(text):\n if text is None:\n return None\n return to_bytes(text)\n\n\nclass MailSender:\n def __init__(\n self, smtphost='localhost', mailfrom='scrapy@localhost', smtpuser=None,\n smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False\n ):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = _to_bytes_or_none(smtpuser)\n self.smtppass = _to_bytes_or_none(smtppass)\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(\n smtphost=settings['MAIL_HOST'],\n mailfrom=settings['MAIL_FROM'],\n smtpuser=settings['MAIL_USER'],\n smtppass=settings['MAIL_PASS'],\n smtpport=settings.getint('MAIL_PORT'),\n smtptls=settings.getbool('MAIL_TLS'),\n smtpssl=settings.getbool('MAIL_SSL'),\n )\n\n def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):\n from twisted.internet import reactor\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mimetype.split('/', 1))\n\n to = list(arg_to_iter(to))\n cc = list(arg_to_iter(cc))\n\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg['Cc'] = COMMASPACE.join(cc)\n\n if charset:\n msg.set_charset(charset)\n\n if attachs:\n msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split('/'))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment', filename=attach_name)\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': len(attachs)})\n return\n\n dfd = self._sendmail(rcpts, msg.as_string().encode(charset or 'utf-8'))\n dfd.addCallbacks(\n callback=self._sent_ok,\n errback=self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)],\n )\n reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': nattachs})\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n '- %(mailerr)s',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': nattachs, 'mailerr': errstr})\n\n def _sendmail(self, to_addrs, msg):\n # Import twisted.mail here because it is not available in python3\n from twisted.internet import reactor\n from twisted.mail.smtp import ESMTPSenderFactory\n msg = BytesIO(msg)\n d = defer.Deferred()\n factory = ESMTPSenderFactory(\n self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d,\n heloFallback=True, requireAuthentication=False, requireTransportSecurity=self.smtptls,\n )\n factory.noisy = False\n\n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n", "path": "scrapy/mail.py"}], "after_files": [{"content": "\"\"\"\nMail sending helpers\n\nSee documentation in docs/topics/email.rst\n\"\"\"\nimport logging\nfrom email import encoders as Encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.nonmultipart import MIMENonMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom io import BytesIO\n\nfrom twisted.python.versions import Version\nfrom twisted.internet import defer, ssl\nfrom twisted import version as twisted_version\n\nfrom scrapy.utils.misc import arg_to_iter\nfrom scrapy.utils.python import to_bytes\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Defined in the email.utils module, but undocumented:\n# https://github.com/python/cpython/blob/v3.9.0/Lib/email/utils.py#L42\nCOMMASPACE = \", \"\n\n\ndef _to_bytes_or_none(text):\n if text is None:\n return None\n return to_bytes(text)\n\n\nclass MailSender:\n def __init__(\n self, smtphost='localhost', mailfrom='scrapy@localhost', smtpuser=None,\n smtppass=None, smtpport=25, smtptls=False, smtpssl=False, debug=False\n ):\n self.smtphost = smtphost\n self.smtpport = smtpport\n self.smtpuser = _to_bytes_or_none(smtpuser)\n self.smtppass = _to_bytes_or_none(smtppass)\n self.smtptls = smtptls\n self.smtpssl = smtpssl\n self.mailfrom = mailfrom\n self.debug = debug\n\n @classmethod\n def from_settings(cls, settings):\n return cls(\n smtphost=settings['MAIL_HOST'],\n mailfrom=settings['MAIL_FROM'],\n smtpuser=settings['MAIL_USER'],\n smtppass=settings['MAIL_PASS'],\n smtpport=settings.getint('MAIL_PORT'),\n smtptls=settings.getbool('MAIL_TLS'),\n smtpssl=settings.getbool('MAIL_SSL'),\n )\n\n def send(self, to, subject, body, cc=None, attachs=(), mimetype='text/plain', charset=None, _callback=None):\n from twisted.internet import reactor\n if attachs:\n msg = MIMEMultipart()\n else:\n msg = MIMENonMultipart(*mimetype.split('/', 1))\n\n to = list(arg_to_iter(to))\n cc = list(arg_to_iter(cc))\n\n msg['From'] = self.mailfrom\n msg['To'] = COMMASPACE.join(to)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n rcpts = to[:]\n if cc:\n rcpts.extend(cc)\n msg['Cc'] = COMMASPACE.join(cc)\n\n if charset:\n msg.set_charset(charset)\n\n if attachs:\n msg.attach(MIMEText(body, 'plain', charset or 'us-ascii'))\n for attach_name, mimetype, f in attachs:\n part = MIMEBase(*mimetype.split('/'))\n part.set_payload(f.read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment', filename=attach_name)\n msg.attach(part)\n else:\n msg.set_payload(body)\n\n if _callback:\n _callback(to=to, subject=subject, body=body, cc=cc, attach=attachs, msg=msg)\n\n if self.debug:\n logger.debug('Debug mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': len(attachs)})\n return\n\n dfd = self._sendmail(rcpts, msg.as_string().encode(charset or 'utf-8'))\n dfd.addCallbacks(\n callback=self._sent_ok,\n errback=self._sent_failed,\n callbackArgs=[to, cc, subject, len(attachs)],\n errbackArgs=[to, cc, subject, len(attachs)],\n )\n reactor.addSystemEventTrigger('before', 'shutdown', lambda: dfd)\n return dfd\n\n def _sent_ok(self, result, to, cc, subject, nattachs):\n logger.info('Mail sent OK: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': nattachs})\n\n def _sent_failed(self, failure, to, cc, subject, nattachs):\n errstr = str(failure.value)\n logger.error('Unable to send mail: To=%(mailto)s Cc=%(mailcc)s '\n 'Subject=\"%(mailsubject)s\" Attachs=%(mailattachs)d'\n '- %(mailerr)s',\n {'mailto': to, 'mailcc': cc, 'mailsubject': subject,\n 'mailattachs': nattachs, 'mailerr': errstr})\n\n def _sendmail(self, to_addrs, msg):\n from twisted.internet import reactor\n msg = BytesIO(msg)\n d = defer.Deferred()\n\n factory = self._create_sender_factory(to_addrs, msg, d)\n\n if self.smtpssl:\n reactor.connectSSL(self.smtphost, self.smtpport, factory, ssl.ClientContextFactory())\n else:\n reactor.connectTCP(self.smtphost, self.smtpport, factory)\n\n return d\n\n def _create_sender_factory(self, to_addrs, msg, d):\n from twisted.mail.smtp import ESMTPSenderFactory\n\n factory_keywords = {\n 'heloFallback': True,\n 'requireAuthentication': False,\n 'requireTransportSecurity': self.smtptls\n }\n\n # Newer versions of twisted require the hostname to use STARTTLS\n if twisted_version >= Version('twisted', 21, 2, 0):\n factory_keywords['hostname'] = self.smtphost\n\n factory = ESMTPSenderFactory(self.smtpuser, self.smtppass, self.mailfrom, to_addrs, msg, d, **factory_keywords)\n factory.noisy = False\n return factory\n", "path": "scrapy/mail.py"}]}
| 2,152 | 508 |
gh_patches_debug_9050
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-3910
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[runtimeconfig] typo: "config.get_variable(...)"
https://github.com/GoogleCloudPlatform/google-cloud-python/blob/bee32c0ae68b1e6cf224f9017c758a8e31e8be83/runtimeconfig/google/cloud/runtimeconfig/config.py#L190
[runtimeconfig] Should be "client.config(...)" not "client.get_config(...)
https://github.com/GoogleCloudPlatform/google-cloud-python/blob/bee32c0ae68b1e6cf224f9017c758a8e31e8be83/runtimeconfig/google/cloud/runtimeconfig/config.py#L189
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `runtimeconfig/google/cloud/runtimeconfig/config.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Create / interact with Google Cloud RuntimeConfig configs."""
16
17 from google.api.core import page_iterator
18 from google.cloud.exceptions import NotFound
19 from google.cloud.runtimeconfig._helpers import config_name_from_full_name
20 from google.cloud.runtimeconfig.variable import Variable
21
22
23 class Config(object):
24 """A Config resource in the Cloud RuntimeConfig service.
25
26 This consists of metadata and a hierarchy of variables.
27
28 See
29 https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs
30
31 :type client: :class:`google.cloud.runtimeconfig.client.Client`
32 :param client: A client which holds credentials and project configuration
33 for the config (which requires a project).
34
35 :type name: str
36 :param name: The name of the config.
37 """
38
39 def __init__(self, client, name):
40 self._client = client
41 self.name = name
42 self._properties = {}
43
44 def __repr__(self):
45 return '<Config: %s>' % (self.name,)
46
47 @property
48 def client(self):
49 """The client bound to this config."""
50 return self._client
51
52 @property
53 def description(self):
54 """Description of the config object.
55
56 See
57 https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs#resource-runtimeconfig
58
59 :rtype: str, or ``NoneType``
60 :returns: the description (None until set from the server).
61 """
62 return self._properties.get('description')
63
64 @property
65 def project(self):
66 """Project bound to the config.
67
68 :rtype: str
69 :returns: the project (derived from the client).
70 """
71 return self._client.project
72
73 @property
74 def full_name(self):
75 """Fully-qualified name of this variable.
76
77 Example:
78 ``projects/my-project/configs/my-config``
79
80 :rtype: str
81 :returns: The full name based on project and config names.
82
83 :raises: :class:`ValueError` if the config is missing a name.
84 """
85 if not self.name:
86 raise ValueError('Missing config name.')
87 return 'projects/%s/configs/%s' % (self._client.project, self.name)
88
89 @property
90 def path(self):
91 """URL path for the config's APIs.
92
93 :rtype: str
94 :returns: The URL path based on project and config names.
95 """
96 return '/%s' % (self.full_name,)
97
98 def variable(self, variable_name):
99 """Factory constructor for variable object.
100
101 .. note::
102 This will not make an HTTP request; it simply instantiates
103 a variable object owned by this config.
104
105 :type variable_name: str
106 :param variable_name: The name of the variable to be instantiated.
107
108 :rtype: :class:`google.cloud.runtimeconfig.variable.Variable`
109 :returns: The variable object created.
110 """
111 return Variable(name=variable_name, config=self)
112
113 def _require_client(self, client):
114 """Check client or verify over-ride.
115
116 :type client: :class:`google.cloud.runtimconfig.client.Client`
117 :param client:
118 (Optional) The client to use. If not passed, falls back to the
119 ``client`` stored on the current zone.
120
121 :rtype: :class:`google.cloud.runtimeconfig.client.Client`
122 :returns: The client passed in or the currently bound client.
123 """
124 if client is None:
125 client = self._client
126 return client
127
128 def _set_properties(self, api_response):
129 """Update properties from resource in body of ``api_response``
130
131 :type api_response: dict
132 :param api_response: response returned from an API call
133 """
134 self._properties.clear()
135 cleaned = api_response.copy()
136 if 'name' in cleaned:
137 self.name = config_name_from_full_name(cleaned.pop('name'))
138 self._properties.update(cleaned)
139
140 def exists(self, client=None):
141 """Determines whether or not this config exists.
142
143 :type client: :class:`~google.cloud.runtimeconfig.client.Client`
144 :param client:
145 (Optional) The client to use. If not passed, falls back to the
146 ``client`` stored on the current config.
147
148 :rtype: bool
149 :returns: True if the config exists in Cloud Runtime Configurator.
150 """
151 client = self._require_client(client)
152 try:
153 # We only need the status code (200 or not) so we seek to
154 # minimize the returned payload.
155 query_params = {'fields': 'name'}
156 client._connection.api_request(
157 method='GET', path=self.path, query_params=query_params)
158 return True
159 except NotFound:
160 return False
161
162 def reload(self, client=None):
163 """API call: reload the config via a ``GET`` request.
164
165 This method will reload the newest data for the config.
166
167 See
168 https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs/get
169
170 :type client: :class:`google.cloud.runtimeconfig.client.Client`
171 :param client:
172 (Optional) The client to use. If not passed, falls back to the
173 client stored on the current config.
174 """
175 client = self._require_client(client)
176
177 # We assume the config exists. If it doesn't it will raise a NotFound
178 # exception.
179 resp = client._connection.api_request(method='GET', path=self.path)
180 self._set_properties(api_response=resp)
181
182 def get_variable(self, variable_name, client=None):
183 """API call: get a variable via a ``GET`` request.
184
185 This will return None if the variable doesn't exist::
186
187 >>> from google.cloud import runtimeconfig
188 >>> client = runtimeconfig.Client()
189 >>> config = client.get_config('my-config')
190 >>> print(config.get_varialbe('variable-name'))
191 <Variable: my-config, variable-name>
192 >>> print(config.get_variable('does-not-exist'))
193 None
194
195 :type variable_name: str
196 :param variable_name: The name of the variable to retrieve.
197
198 :type client: :class:`~google.cloud.runtimeconfig.client.Client`
199 :param client:
200 (Optional) The client to use. If not passed, falls back to the
201 ``client`` stored on the current config.
202
203 :rtype: :class:`google.cloud.runtimeconfig.variable.Variable` or None
204 :returns: The variable object if it exists, otherwise None.
205 """
206 client = self._require_client(client)
207 variable = Variable(config=self, name=variable_name)
208 try:
209 variable.reload(client=client)
210 return variable
211 except NotFound:
212 return None
213
214 def list_variables(self, page_size=None, page_token=None, client=None):
215 """API call: list variables for this config.
216
217 This only lists variable names, not the values.
218
219 See
220 https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables/list
221
222 :type page_size: int
223 :param page_size:
224 (Optional) Maximum number of variables to return per page.
225
226 :type page_token: str
227 :param page_token: opaque marker for the next "page" of variables. If
228 not passed, will return the first page of variables.
229
230 :type client: :class:`~google.cloud.runtimeconfig.client.Client`
231 :param client:
232 (Optional) The client to use. If not passed, falls back to the
233 ``client`` stored on the current config.
234
235 :rtype: :class:`~google.api.core.page_iterator.Iterator`
236 :returns:
237 Iterator of :class:`~google.cloud.runtimeconfig.variable.Variable`
238 belonging to this project.
239 """
240 path = '%s/variables' % (self.path,)
241 client = self._require_client(client)
242 iterator = page_iterator.HTTPIterator(
243 client=client,
244 api_request=client._connection.api_request,
245 path=path,
246 item_to_value=_item_to_variable,
247 items_key='variables',
248 page_token=page_token,
249 max_results=page_size)
250 iterator._MAX_RESULTS = 'pageSize'
251 iterator.config = self
252 return iterator
253
254
255 def _item_to_variable(iterator, resource):
256 """Convert a JSON variable to the native object.
257
258 :type iterator: :class:`~google.api.core.page_iterator.Iterator`
259 :param iterator: The iterator that has retrieved the item.
260
261 :type resource: dict
262 :param resource: An item to be converted to a variable.
263
264 :rtype: :class:`.Variable`
265 :returns: The next variable in the page.
266 """
267 return Variable.from_api_repr(resource, iterator.config)
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/runtimeconfig/google/cloud/runtimeconfig/config.py b/runtimeconfig/google/cloud/runtimeconfig/config.py
--- a/runtimeconfig/google/cloud/runtimeconfig/config.py
+++ b/runtimeconfig/google/cloud/runtimeconfig/config.py
@@ -186,8 +186,8 @@
>>> from google.cloud import runtimeconfig
>>> client = runtimeconfig.Client()
- >>> config = client.get_config('my-config')
- >>> print(config.get_varialbe('variable-name'))
+ >>> config = client.config('my-config')
+ >>> print(config.get_variable('variable-name'))
<Variable: my-config, variable-name>
>>> print(config.get_variable('does-not-exist'))
None
|
{"golden_diff": "diff --git a/runtimeconfig/google/cloud/runtimeconfig/config.py b/runtimeconfig/google/cloud/runtimeconfig/config.py\n--- a/runtimeconfig/google/cloud/runtimeconfig/config.py\n+++ b/runtimeconfig/google/cloud/runtimeconfig/config.py\n@@ -186,8 +186,8 @@\n \n >>> from google.cloud import runtimeconfig\n >>> client = runtimeconfig.Client()\n- >>> config = client.get_config('my-config')\n- >>> print(config.get_varialbe('variable-name'))\n+ >>> config = client.config('my-config')\n+ >>> print(config.get_variable('variable-name'))\n <Variable: my-config, variable-name>\n >>> print(config.get_variable('does-not-exist'))\n None\n", "issue": "[runtimeconfig] typo: \"config.get_variable(...)\"\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/bee32c0ae68b1e6cf224f9017c758a8e31e8be83/runtimeconfig/google/cloud/runtimeconfig/config.py#L190\n[runtimeconfig] Should be \"client.config(...)\" not \"client.get_config(...)\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/bee32c0ae68b1e6cf224f9017c758a8e31e8be83/runtimeconfig/google/cloud/runtimeconfig/config.py#L189\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Create / interact with Google Cloud RuntimeConfig configs.\"\"\"\n\nfrom google.api.core import page_iterator\nfrom google.cloud.exceptions import NotFound\nfrom google.cloud.runtimeconfig._helpers import config_name_from_full_name\nfrom google.cloud.runtimeconfig.variable import Variable\n\n\nclass Config(object):\n \"\"\"A Config resource in the Cloud RuntimeConfig service.\n\n This consists of metadata and a hierarchy of variables.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs\n\n :type client: :class:`google.cloud.runtimeconfig.client.Client`\n :param client: A client which holds credentials and project configuration\n for the config (which requires a project).\n\n :type name: str\n :param name: The name of the config.\n \"\"\"\n\n def __init__(self, client, name):\n self._client = client\n self.name = name\n self._properties = {}\n\n def __repr__(self):\n return '<Config: %s>' % (self.name,)\n\n @property\n def client(self):\n \"\"\"The client bound to this config.\"\"\"\n return self._client\n\n @property\n def description(self):\n \"\"\"Description of the config object.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs#resource-runtimeconfig\n\n :rtype: str, or ``NoneType``\n :returns: the description (None until set from the server).\n \"\"\"\n return self._properties.get('description')\n\n @property\n def project(self):\n \"\"\"Project bound to the config.\n\n :rtype: str\n :returns: the project (derived from the client).\n \"\"\"\n return self._client.project\n\n @property\n def full_name(self):\n \"\"\"Fully-qualified name of this variable.\n\n Example:\n ``projects/my-project/configs/my-config``\n\n :rtype: str\n :returns: The full name based on project and config names.\n\n :raises: :class:`ValueError` if the config is missing a name.\n \"\"\"\n if not self.name:\n raise ValueError('Missing config name.')\n return 'projects/%s/configs/%s' % (self._client.project, self.name)\n\n @property\n def path(self):\n \"\"\"URL path for the config's APIs.\n\n :rtype: str\n :returns: The URL path based on project and config names.\n \"\"\"\n return '/%s' % (self.full_name,)\n\n def variable(self, variable_name):\n \"\"\"Factory constructor for variable object.\n\n .. note::\n This will not make an HTTP request; it simply instantiates\n a variable object owned by this config.\n\n :type variable_name: str\n :param variable_name: The name of the variable to be instantiated.\n\n :rtype: :class:`google.cloud.runtimeconfig.variable.Variable`\n :returns: The variable object created.\n \"\"\"\n return Variable(name=variable_name, config=self)\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`google.cloud.runtimconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current zone.\n\n :rtype: :class:`google.cloud.runtimeconfig.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def _set_properties(self, api_response):\n \"\"\"Update properties from resource in body of ``api_response``\n\n :type api_response: dict\n :param api_response: response returned from an API call\n \"\"\"\n self._properties.clear()\n cleaned = api_response.copy()\n if 'name' in cleaned:\n self.name = config_name_from_full_name(cleaned.pop('name'))\n self._properties.update(cleaned)\n\n def exists(self, client=None):\n \"\"\"Determines whether or not this config exists.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: bool\n :returns: True if the config exists in Cloud Runtime Configurator.\n \"\"\"\n client = self._require_client(client)\n try:\n # We only need the status code (200 or not) so we seek to\n # minimize the returned payload.\n query_params = {'fields': 'name'}\n client._connection.api_request(\n method='GET', path=self.path, query_params=query_params)\n return True\n except NotFound:\n return False\n\n def reload(self, client=None):\n \"\"\"API call: reload the config via a ``GET`` request.\n\n This method will reload the newest data for the config.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs/get\n\n :type client: :class:`google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n client stored on the current config.\n \"\"\"\n client = self._require_client(client)\n\n # We assume the config exists. If it doesn't it will raise a NotFound\n # exception.\n resp = client._connection.api_request(method='GET', path=self.path)\n self._set_properties(api_response=resp)\n\n def get_variable(self, variable_name, client=None):\n \"\"\"API call: get a variable via a ``GET`` request.\n\n This will return None if the variable doesn't exist::\n\n >>> from google.cloud import runtimeconfig\n >>> client = runtimeconfig.Client()\n >>> config = client.get_config('my-config')\n >>> print(config.get_varialbe('variable-name'))\n <Variable: my-config, variable-name>\n >>> print(config.get_variable('does-not-exist'))\n None\n\n :type variable_name: str\n :param variable_name: The name of the variable to retrieve.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: :class:`google.cloud.runtimeconfig.variable.Variable` or None\n :returns: The variable object if it exists, otherwise None.\n \"\"\"\n client = self._require_client(client)\n variable = Variable(config=self, name=variable_name)\n try:\n variable.reload(client=client)\n return variable\n except NotFound:\n return None\n\n def list_variables(self, page_size=None, page_token=None, client=None):\n \"\"\"API call: list variables for this config.\n\n This only lists variable names, not the values.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables/list\n\n :type page_size: int\n :param page_size:\n (Optional) Maximum number of variables to return per page.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of variables. If\n not passed, will return the first page of variables.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: :class:`~google.api.core.page_iterator.Iterator`\n :returns:\n Iterator of :class:`~google.cloud.runtimeconfig.variable.Variable`\n belonging to this project.\n \"\"\"\n path = '%s/variables' % (self.path,)\n client = self._require_client(client)\n iterator = page_iterator.HTTPIterator(\n client=client,\n api_request=client._connection.api_request,\n path=path,\n item_to_value=_item_to_variable,\n items_key='variables',\n page_token=page_token,\n max_results=page_size)\n iterator._MAX_RESULTS = 'pageSize'\n iterator.config = self\n return iterator\n\n\ndef _item_to_variable(iterator, resource):\n \"\"\"Convert a JSON variable to the native object.\n\n :type iterator: :class:`~google.api.core.page_iterator.Iterator`\n :param iterator: The iterator that has retrieved the item.\n\n :type resource: dict\n :param resource: An item to be converted to a variable.\n\n :rtype: :class:`.Variable`\n :returns: The next variable in the page.\n \"\"\"\n return Variable.from_api_repr(resource, iterator.config)\n", "path": "runtimeconfig/google/cloud/runtimeconfig/config.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Create / interact with Google Cloud RuntimeConfig configs.\"\"\"\n\nfrom google.api.core import page_iterator\nfrom google.cloud.exceptions import NotFound\nfrom google.cloud.runtimeconfig._helpers import config_name_from_full_name\nfrom google.cloud.runtimeconfig.variable import Variable\n\n\nclass Config(object):\n \"\"\"A Config resource in the Cloud RuntimeConfig service.\n\n This consists of metadata and a hierarchy of variables.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs\n\n :type client: :class:`google.cloud.runtimeconfig.client.Client`\n :param client: A client which holds credentials and project configuration\n for the config (which requires a project).\n\n :type name: str\n :param name: The name of the config.\n \"\"\"\n\n def __init__(self, client, name):\n self._client = client\n self.name = name\n self._properties = {}\n\n def __repr__(self):\n return '<Config: %s>' % (self.name,)\n\n @property\n def client(self):\n \"\"\"The client bound to this config.\"\"\"\n return self._client\n\n @property\n def description(self):\n \"\"\"Description of the config object.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs#resource-runtimeconfig\n\n :rtype: str, or ``NoneType``\n :returns: the description (None until set from the server).\n \"\"\"\n return self._properties.get('description')\n\n @property\n def project(self):\n \"\"\"Project bound to the config.\n\n :rtype: str\n :returns: the project (derived from the client).\n \"\"\"\n return self._client.project\n\n @property\n def full_name(self):\n \"\"\"Fully-qualified name of this variable.\n\n Example:\n ``projects/my-project/configs/my-config``\n\n :rtype: str\n :returns: The full name based on project and config names.\n\n :raises: :class:`ValueError` if the config is missing a name.\n \"\"\"\n if not self.name:\n raise ValueError('Missing config name.')\n return 'projects/%s/configs/%s' % (self._client.project, self.name)\n\n @property\n def path(self):\n \"\"\"URL path for the config's APIs.\n\n :rtype: str\n :returns: The URL path based on project and config names.\n \"\"\"\n return '/%s' % (self.full_name,)\n\n def variable(self, variable_name):\n \"\"\"Factory constructor for variable object.\n\n .. note::\n This will not make an HTTP request; it simply instantiates\n a variable object owned by this config.\n\n :type variable_name: str\n :param variable_name: The name of the variable to be instantiated.\n\n :rtype: :class:`google.cloud.runtimeconfig.variable.Variable`\n :returns: The variable object created.\n \"\"\"\n return Variable(name=variable_name, config=self)\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`google.cloud.runtimconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current zone.\n\n :rtype: :class:`google.cloud.runtimeconfig.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def _set_properties(self, api_response):\n \"\"\"Update properties from resource in body of ``api_response``\n\n :type api_response: dict\n :param api_response: response returned from an API call\n \"\"\"\n self._properties.clear()\n cleaned = api_response.copy()\n if 'name' in cleaned:\n self.name = config_name_from_full_name(cleaned.pop('name'))\n self._properties.update(cleaned)\n\n def exists(self, client=None):\n \"\"\"Determines whether or not this config exists.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: bool\n :returns: True if the config exists in Cloud Runtime Configurator.\n \"\"\"\n client = self._require_client(client)\n try:\n # We only need the status code (200 or not) so we seek to\n # minimize the returned payload.\n query_params = {'fields': 'name'}\n client._connection.api_request(\n method='GET', path=self.path, query_params=query_params)\n return True\n except NotFound:\n return False\n\n def reload(self, client=None):\n \"\"\"API call: reload the config via a ``GET`` request.\n\n This method will reload the newest data for the config.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs/get\n\n :type client: :class:`google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n client stored on the current config.\n \"\"\"\n client = self._require_client(client)\n\n # We assume the config exists. If it doesn't it will raise a NotFound\n # exception.\n resp = client._connection.api_request(method='GET', path=self.path)\n self._set_properties(api_response=resp)\n\n def get_variable(self, variable_name, client=None):\n \"\"\"API call: get a variable via a ``GET`` request.\n\n This will return None if the variable doesn't exist::\n\n >>> from google.cloud import runtimeconfig\n >>> client = runtimeconfig.Client()\n >>> config = client.config('my-config')\n >>> print(config.get_variable('variable-name'))\n <Variable: my-config, variable-name>\n >>> print(config.get_variable('does-not-exist'))\n None\n\n :type variable_name: str\n :param variable_name: The name of the variable to retrieve.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: :class:`google.cloud.runtimeconfig.variable.Variable` or None\n :returns: The variable object if it exists, otherwise None.\n \"\"\"\n client = self._require_client(client)\n variable = Variable(config=self, name=variable_name)\n try:\n variable.reload(client=client)\n return variable\n except NotFound:\n return None\n\n def list_variables(self, page_size=None, page_token=None, client=None):\n \"\"\"API call: list variables for this config.\n\n This only lists variable names, not the values.\n\n See\n https://cloud.google.com/deployment-manager/runtime-configurator/reference/rest/v1beta1/projects.configs.variables/list\n\n :type page_size: int\n :param page_size:\n (Optional) Maximum number of variables to return per page.\n\n :type page_token: str\n :param page_token: opaque marker for the next \"page\" of variables. If\n not passed, will return the first page of variables.\n\n :type client: :class:`~google.cloud.runtimeconfig.client.Client`\n :param client:\n (Optional) The client to use. If not passed, falls back to the\n ``client`` stored on the current config.\n\n :rtype: :class:`~google.api.core.page_iterator.Iterator`\n :returns:\n Iterator of :class:`~google.cloud.runtimeconfig.variable.Variable`\n belonging to this project.\n \"\"\"\n path = '%s/variables' % (self.path,)\n client = self._require_client(client)\n iterator = page_iterator.HTTPIterator(\n client=client,\n api_request=client._connection.api_request,\n path=path,\n item_to_value=_item_to_variable,\n items_key='variables',\n page_token=page_token,\n max_results=page_size)\n iterator._MAX_RESULTS = 'pageSize'\n iterator.config = self\n return iterator\n\n\ndef _item_to_variable(iterator, resource):\n \"\"\"Convert a JSON variable to the native object.\n\n :type iterator: :class:`~google.api.core.page_iterator.Iterator`\n :param iterator: The iterator that has retrieved the item.\n\n :type resource: dict\n :param resource: An item to be converted to a variable.\n\n :rtype: :class:`.Variable`\n :returns: The next variable in the page.\n \"\"\"\n return Variable.from_api_repr(resource, iterator.config)\n", "path": "runtimeconfig/google/cloud/runtimeconfig/config.py"}]}
| 3,146 | 148 |
gh_patches_debug_210
|
rasdani/github-patches
|
git_diff
|
xonsh__xonsh-3049
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exception on startup (pygments_cache)
<!--- Provide a general summary of the issue in the Title above -->
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup -->
## Expected Behavior
<!--- Tell us what should happen -->
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
## Steps to Reproduce
<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/ptk2/__init__.py`
Content:
```
1 # must come before ptk / pygments imports
2 from xonsh.lazyasd import load_module_in_background
3
4 load_module_in_background(
5 "pkg_resources",
6 debug="XONSH_DEBUG",
7 replacements={"pygments.plugin": "pkg_resources"},
8 )
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py
--- a/xonsh/ptk2/__init__.py
+++ b/xonsh/ptk2/__init__.py
@@ -1,8 +0,0 @@
-# must come before ptk / pygments imports
-from xonsh.lazyasd import load_module_in_background
-
-load_module_in_background(
- "pkg_resources",
- debug="XONSH_DEBUG",
- replacements={"pygments.plugin": "pkg_resources"},
-)
|
{"golden_diff": "diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py\n--- a/xonsh/ptk2/__init__.py\n+++ b/xonsh/ptk2/__init__.py\n@@ -1,8 +0,0 @@\n-# must come before ptk / pygments imports\n-from xonsh.lazyasd import load_module_in_background\n-\n-load_module_in_background(\n- \"pkg_resources\",\n- debug=\"XONSH_DEBUG\",\n- replacements={\"pygments.plugin\": \"pkg_resources\"},\n-)\n", "issue": "Exception on startup (pygments_cache)\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: http://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n<!--- Please post the output of the `xonfig` command (run from inside xonsh) so we know more about your current setup -->\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`. \r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\n\r\n## Steps to Reproduce\r\n<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->\r\n\n", "before_files": [{"content": "# must come before ptk / pygments imports\nfrom xonsh.lazyasd import load_module_in_background\n\nload_module_in_background(\n \"pkg_resources\",\n debug=\"XONSH_DEBUG\",\n replacements={\"pygments.plugin\": \"pkg_resources\"},\n)\n", "path": "xonsh/ptk2/__init__.py"}], "after_files": [{"content": "", "path": "xonsh/ptk2/__init__.py"}]}
| 584 | 125 |
gh_patches_debug_19926
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-4432
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Running into TypeError exception with `batch_predict` component
### What steps did you take:
I am using batch_predict component to schedule prediction job on AI platform. I was referring to steps here. https://github.com/kubeflow/pipelines/tree/master/components/gcp/ml_engine/batch_predict
- I downloaded the component from https://raw.githubusercontent.com/kubeflow/pipelines/1.0.0/components/gcp/ml_engine/batch_predict/component.yaml.
- loaded the component with `ml_op = load_component_from_file(<local-path>)`
- created pipeline component with `ml_op(<attributes-mentioned-below>)`
- scheduled the pipeline on kubeflow
Job Parameters:
```
prediction_input: {'runtimeVersion': '2.1'}
project_id: <gcs-project>
region: <region-name>
model_path: <gcs-path>
input_paths: [<list-of-input-paths>]
input_data_format: TF_RECORD_GZIP
output_path: <gcs-path>
output_data_format: JSON
```
### What happened:
Running into type error while generating the job_id.
Issue:
It is raising an exception while generating id to schedule the job. This line is failing https://github.com/kubeflow/pipelines/blob/1.0.0/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py#L44 with this error `TypeError: expected string or buffer`. I am not passing any job_id and job_id_prefix. It is using the default values for generating the name.
Error trace:
```
INFO:root:Start KFP context with ID: 858b3ff01cdeed5c0b0b7fd9d2655641.
Traceback (most recent call last):
File "/usr/local/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/local/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/ml/kfp_component/launcher/__main__.py", line 45, in <module>
main()
File "/ml/kfp_component/launcher/__main__.py", line 42, in main
launch(args.file_or_module, args.args)
File "kfp_component/launcher/launcher.py", line 45, in launch
return fire.Fire(module, command=args, name=module.__name__)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 127, in Fire
component_trace = _Fire(component, args, context, name)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 366, in _Fire
component, remaining_args)
File "/usr/local/lib/python2.7/site-packages/fire/core.py", line 542, in _CallCallable
result = fn(*varargs, **kwargs)
File "kfp_component/google/ml_engine/_batch_predict.py", line 70, in batch_predict
create_job(project_id, job, job_id_prefix, wait_interval)
File "kfp_component/google/ml_engine/_create_job.py", line 48, in create_job
return CreateJobOp(project_id, job, job_id_prefix, job_id, wait_interval
File "kfp_component/google/ml_engine/_create_job.py", line 63, in execute_and_wait
self._set_job_id(ctx.context_id())
File "kfp_component/google/ml_engine/_create_job.py", line 74, in _set_job_id
job_id = gcp_common.normalize_name(job_id)
File "kfp_component/google/common/_utils.py", line 45, in normalize_name
invalid_char_placeholder, name)
File "/usr/local/lib/python2.7/re.py", line 155, in sub
return _compile(pattern, flags).sub(repl, string, count)
TypeError: expected string or buffer
```
### What did you expect to happen:
I was expecting the component to schedule the job with auto generated job id.
### Environment:
<!-- Please fill in those that seem relevant. -->
How did you deploy Kubeflow Pipelines (KFP)?
- I used Kustomize to deploy standalone kubeflow pipeline manifests on GKE.
<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->
KFP version: <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->
1.0.0
KFP SDK version: <!-- Please attach the output of this shell command: $pip list | grep kfp -->
kfp==0.5.1
### Anything else you would like to add:
[Miscellaneous information that will assist in solving the issue.]
/kind bug
<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->
<!--
// /area frontend
/area backend
// /area sdk
// /area testing
/area engprod
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from fire import decorators
16 from ._create_job import create_job
17
18 @decorators.SetParseFns(python_version=str, runtime_version=str)
19 def train(project_id, job_id_output_path, python_module=None, package_uris=None,
20 region=None, args=None, job_dir=None, python_version=None,
21 runtime_version=None, master_image_uri=None, worker_image_uri=None,
22 training_input=None, job_id_prefix=None, job_id=None, wait_interval=30):
23 """Creates a MLEngine training job.
24
25 Args:
26 project_id (str): Required. The ID of the parent project of the job.
27 python_module (str): Required. The Python module name to run after
28 installing the packages.
29 package_uris (list): Required. The Google Cloud Storage location of
30 the packages with the training program and any additional
31 dependencies. The maximum number of package URIs is 100.
32 region (str): Required. The Google Compute Engine region to run the
33 training job in
34 args (list): Command line arguments to pass to the program.
35 job_dir (str): A Google Cloud Storage path in which to store training
36 outputs and other data needed for training. This path is passed
37 to your TensorFlow program as the '--job-dir' command-line
38 argument. The benefit of specifying this field is that Cloud ML
39 validates the path for use in training.
40 python_version (str): Optional. The version of Python used in training.
41 If not set, the default version is '2.7'. Python '3.5' is
42 available when runtimeVersion is set to '1.4' and above.
43 Python '2.7' works with all supported runtime versions.
44 runtime_version (str): Optional. The Cloud ML Engine runtime version
45 to use for training. If not set, Cloud ML Engine uses the
46 default stable version, 1.0.
47 master_image_uri (str): The Docker image to run on the master replica.
48 This image must be in Container Registry.
49 worker_image_uri (str): The Docker image to run on the worker replica.
50 This image must be in Container Registry.
51 training_input (dict): Input parameters to create a training job.
52 job_id_prefix (str): the prefix of the generated job id.
53 job_id (str): the created job_id, takes precedence over generated job
54 id if set.
55 wait_interval (int): optional wait interval between calls
56 to get job status. Defaults to 30.
57 """
58 if not training_input:
59 training_input = {}
60 if python_module:
61 training_input['pythonModule'] = python_module
62 if package_uris:
63 training_input['packageUris'] = package_uris
64 if region:
65 training_input['region'] = region
66 if args:
67 training_input['args'] = args
68 if job_dir:
69 training_input['jobDir'] = job_dir
70 if python_version:
71 training_input['pythonVersion'] = python_version
72 if runtime_version:
73 training_input['runtimeVersion'] = runtime_version
74 if master_image_uri:
75 if 'masterConfig' not in training_input:
76 training_input['masterConfig'] = {}
77 training_input['masterConfig']['imageUri'] = master_image_uri
78 if worker_image_uri:
79 if 'workerConfig' not in training_input:
80 training_input['workerConfig'] = {}
81 training_input['workerConfig']['imageUri'] = worker_image_uri
82 job = {
83 'trainingInput': training_input
84 }
85 return create_job(project_id, job, job_id_prefix, job_id, wait_interval, job_id_output_path=job_id_output_path)
```
Path: `components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import re
16
17 from ._create_job import create_job
18
19 def batch_predict(project_id, model_path, input_paths, input_data_format,
20 output_path, region, job_id_output_path, output_data_format=None, prediction_input=None, job_id_prefix=None,
21 wait_interval=30):
22 """Creates a MLEngine batch prediction job.
23
24 Args:
25 project_id (str): Required. The ID of the parent project of the job.
26 model_path (str): Required. The path to the model. It can be either:
27 `projects/[PROJECT_ID]/models/[MODEL_ID]` or
28 `projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]`
29 or a GCS path of a model file.
30 input_paths (list): Required. The Google Cloud Storage location of
31 the input data files. May contain wildcards.
32 input_data_format (str): Required. The format of the input data files.
33 See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat.
34 output_path (str): Required. The output Google Cloud Storage location.
35 region (str): Required. The Google Compute Engine region to run the
36 prediction job in.
37 output_data_format (str): Optional. Format of the output data files,
38 defaults to JSON.
39 prediction_input (dict): Input parameters to create a prediction job.
40 job_id_prefix (str): the prefix of the generated job id.
41 wait_interval (int): optional wait interval between calls
42 to get job status. Defaults to 30.
43 """
44 if not prediction_input:
45 prediction_input = {}
46 if not model_path:
47 raise ValueError('model_path must be provided.')
48 if _is_model_name(model_path):
49 prediction_input['modelName'] = model_path
50 elif _is_model_version_name(model_path):
51 prediction_input['versionName'] = model_path
52 elif _is_gcs_path(model_path):
53 prediction_input['uri'] = model_path
54 else:
55 raise ValueError('model_path value is invalid.')
56
57 if input_paths:
58 prediction_input['inputPaths'] = input_paths
59 if input_data_format:
60 prediction_input['dataFormat'] = input_data_format
61 if output_path:
62 prediction_input['outputPath'] = output_path
63 if output_data_format:
64 prediction_input['outputDataFormat'] = output_data_format
65 if region:
66 prediction_input['region'] = region
67 job = {
68 'predictionInput': prediction_input
69 }
70 create_job(project_id, job, job_id_prefix, wait_interval, job_id_output_path=job_id_output_path)
71
72 def _is_model_name(name):
73 return re.match(r'/projects/[^/]+/models/[^/]+$', name)
74
75 def _is_model_version_name(name):
76 return re.match(r'/projects/[^/]+/models/[^/]+/versions/[^/]+$', name)
77
78 def _is_gcs_path(name):
79 return name.startswith('gs://')
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py
@@ -67,7 +67,13 @@
job = {
'predictionInput': prediction_input
}
- create_job(project_id, job, job_id_prefix, wait_interval, job_id_output_path=job_id_output_path)
+ create_job(
+ project_id=project_id,
+ job=job,
+ job_id_prefix=job_id_prefix,
+ wait_interval=wait_interval,
+ job_id_output_path=job_id_output_path,
+ )
def _is_model_name(name):
return re.match(r'/projects/[^/]+/models/[^/]+$', name)
diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py
@@ -82,4 +82,11 @@
job = {
'trainingInput': training_input
}
- return create_job(project_id, job, job_id_prefix, job_id, wait_interval, job_id_output_path=job_id_output_path)
\ No newline at end of file
+ return create_job(
+ project_id=project_id,
+ job=job,
+ job_id_prefix=job_id_prefix,
+ job_id=job_id,
+ wait_interval=wait_interval,
+ job_id_output_path=job_id_output_path,
+ )
\ No newline at end of file
|
{"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py\n@@ -67,7 +67,13 @@\n job = {\n 'predictionInput': prediction_input\n }\n- create_job(project_id, job, job_id_prefix, wait_interval, job_id_output_path=job_id_output_path)\n+ create_job(\n+ project_id=project_id,\n+ job=job,\n+ job_id_prefix=job_id_prefix,\n+ wait_interval=wait_interval,\n+ job_id_output_path=job_id_output_path,\n+ )\n \n def _is_model_name(name):\n return re.match(r'/projects/[^/]+/models/[^/]+$', name)\ndiff --git a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py\n@@ -82,4 +82,11 @@\n job = {\n 'trainingInput': training_input\n }\n- return create_job(project_id, job, job_id_prefix, job_id, wait_interval, job_id_output_path=job_id_output_path)\n\\ No newline at end of file\n+ return create_job(\n+ project_id=project_id,\n+ job=job,\n+ job_id_prefix=job_id_prefix,\n+ job_id=job_id,\n+ wait_interval=wait_interval,\n+ job_id_output_path=job_id_output_path,\n+ )\n\\ No newline at end of file\n", "issue": "Running into TypeError exception with `batch_predict` component\n### What steps did you take:\r\nI am using batch_predict component to schedule prediction job on AI platform. I was referring to steps here. https://github.com/kubeflow/pipelines/tree/master/components/gcp/ml_engine/batch_predict\r\n- I downloaded the component from https://raw.githubusercontent.com/kubeflow/pipelines/1.0.0/components/gcp/ml_engine/batch_predict/component.yaml. \r\n- loaded the component with `ml_op = load_component_from_file(<local-path>)`\r\n- created pipeline component with `ml_op(<attributes-mentioned-below>)`\r\n- scheduled the pipeline on kubeflow\r\n\r\nJob Parameters:\r\n\r\n```\r\nprediction_input: {'runtimeVersion': '2.1'}\r\nproject_id: <gcs-project>\r\nregion: <region-name>\r\nmodel_path: <gcs-path>\r\ninput_paths: [<list-of-input-paths>]\r\ninput_data_format: TF_RECORD_GZIP\r\noutput_path: <gcs-path>\r\noutput_data_format: JSON\r\n```\r\n\r\n### What happened:\r\n\r\nRunning into type error while generating the job_id.\r\n\r\nIssue:\r\n\r\nIt is raising an exception while generating id to schedule the job. This line is failing https://github.com/kubeflow/pipelines/blob/1.0.0/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py#L44 with this error `TypeError: expected string or buffer`. I am not passing any job_id and job_id_prefix. It is using the default values for generating the name.\r\n\r\nError trace:\r\n\r\n```\r\nINFO:root:Start KFP context with ID: 858b3ff01cdeed5c0b0b7fd9d2655641.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/runpy.py\", line 174, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"/usr/local/lib/python2.7/runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 45, in <module>\r\n main()\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 42, in main\r\n launch(args.file_or_module, args.args)\r\n File \"kfp_component/launcher/launcher.py\", line 45, in launch\r\n return fire.Fire(module, command=args, name=module.__name__)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 127, in Fire\r\n component_trace = _Fire(component, args, context, name)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 366, in _Fire\r\n component, remaining_args)\r\n File \"/usr/local/lib/python2.7/site-packages/fire/core.py\", line 542, in _CallCallable\r\n result = fn(*varargs, **kwargs)\r\n File \"kfp_component/google/ml_engine/_batch_predict.py\", line 70, in batch_predict\r\n create_job(project_id, job, job_id_prefix, wait_interval)\r\n File \"kfp_component/google/ml_engine/_create_job.py\", line 48, in create_job\r\n return CreateJobOp(project_id, job, job_id_prefix, job_id, wait_interval\r\n File \"kfp_component/google/ml_engine/_create_job.py\", line 63, in execute_and_wait\r\n self._set_job_id(ctx.context_id())\r\n File \"kfp_component/google/ml_engine/_create_job.py\", line 74, in _set_job_id\r\n job_id = gcp_common.normalize_name(job_id)\r\n File \"kfp_component/google/common/_utils.py\", line 45, in normalize_name\r\n invalid_char_placeholder, name)\r\n File \"/usr/local/lib/python2.7/re.py\", line 155, in sub\r\n return _compile(pattern, flags).sub(repl, string, count)\r\nTypeError: expected string or buffer\r\n\r\n```\r\n\r\n### What did you expect to happen:\r\n\r\nI was expecting the component to schedule the job with auto generated job id.\r\n\r\n### Environment:\r\n<!-- Please fill in those that seem relevant. -->\r\n\r\nHow did you deploy Kubeflow Pipelines (KFP)?\r\n- I used Kustomize to deploy standalone kubeflow pipeline manifests on GKE.\r\n\r\n<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->\r\n\r\nKFP version: <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->\r\n1.0.0\r\n\r\nKFP SDK version: <!-- Please attach the output of this shell command: $pip list | grep kfp -->\r\nkfp==0.5.1\r\n\r\n### Anything else you would like to add:\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n/kind bug\r\n<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->\r\n<!--\r\n// /area frontend\r\n /area backend\r\n// /area sdk\r\n// /area testing\r\n /area engprod\r\n-->\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom fire import decorators\nfrom ._create_job import create_job\n\[email protected](python_version=str, runtime_version=str)\ndef train(project_id, job_id_output_path, python_module=None, package_uris=None, \n region=None, args=None, job_dir=None, python_version=None, \n runtime_version=None, master_image_uri=None, worker_image_uri=None, \n training_input=None, job_id_prefix=None, job_id=None, wait_interval=30):\n \"\"\"Creates a MLEngine training job.\n\n Args:\n project_id (str): Required. The ID of the parent project of the job.\n python_module (str): Required. The Python module name to run after \n installing the packages.\n package_uris (list): Required. The Google Cloud Storage location of \n the packages with the training program and any additional \n dependencies. The maximum number of package URIs is 100.\n region (str): Required. The Google Compute Engine region to run the \n training job in\n args (list): Command line arguments to pass to the program.\n job_dir (str): A Google Cloud Storage path in which to store training \n outputs and other data needed for training. This path is passed \n to your TensorFlow program as the '--job-dir' command-line \n argument. The benefit of specifying this field is that Cloud ML \n validates the path for use in training.\n python_version (str): Optional. The version of Python used in training. \n If not set, the default version is '2.7'. Python '3.5' is \n available when runtimeVersion is set to '1.4' and above. \n Python '2.7' works with all supported runtime versions.\n runtime_version (str): Optional. The Cloud ML Engine runtime version \n to use for training. If not set, Cloud ML Engine uses the \n default stable version, 1.0. \n master_image_uri (str): The Docker image to run on the master replica. \n This image must be in Container Registry.\n worker_image_uri (str): The Docker image to run on the worker replica. \n This image must be in Container Registry.\n training_input (dict): Input parameters to create a training job.\n job_id_prefix (str): the prefix of the generated job id.\n job_id (str): the created job_id, takes precedence over generated job\n id if set.\n wait_interval (int): optional wait interval between calls\n to get job status. Defaults to 30.\n \"\"\"\n if not training_input:\n training_input = {}\n if python_module:\n training_input['pythonModule'] = python_module\n if package_uris:\n training_input['packageUris'] = package_uris\n if region:\n training_input['region'] = region\n if args:\n training_input['args'] = args\n if job_dir:\n training_input['jobDir'] = job_dir\n if python_version:\n training_input['pythonVersion'] = python_version\n if runtime_version:\n training_input['runtimeVersion'] = runtime_version\n if master_image_uri:\n if 'masterConfig' not in training_input:\n training_input['masterConfig'] = {}\n training_input['masterConfig']['imageUri'] = master_image_uri\n if worker_image_uri:\n if 'workerConfig' not in training_input:\n training_input['workerConfig'] = {}\n training_input['workerConfig']['imageUri'] = worker_image_uri\n job = {\n 'trainingInput': training_input\n }\n return create_job(project_id, job, job_id_prefix, job_id, wait_interval, job_id_output_path=job_id_output_path)", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom ._create_job import create_job\n\ndef batch_predict(project_id, model_path, input_paths, input_data_format, \n output_path, region, job_id_output_path, output_data_format=None, prediction_input=None, job_id_prefix=None,\n wait_interval=30):\n \"\"\"Creates a MLEngine batch prediction job.\n\n Args:\n project_id (str): Required. The ID of the parent project of the job.\n model_path (str): Required. The path to the model. It can be either:\n `projects/[PROJECT_ID]/models/[MODEL_ID]` or \n `projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]`\n or a GCS path of a model file.\n input_paths (list): Required. The Google Cloud Storage location of \n the input data files. May contain wildcards.\n input_data_format (str): Required. The format of the input data files.\n See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat.\n output_path (str): Required. The output Google Cloud Storage location.\n region (str): Required. The Google Compute Engine region to run the \n prediction job in.\n output_data_format (str): Optional. Format of the output data files, \n defaults to JSON.\n prediction_input (dict): Input parameters to create a prediction job.\n job_id_prefix (str): the prefix of the generated job id.\n wait_interval (int): optional wait interval between calls\n to get job status. Defaults to 30.\n \"\"\"\n if not prediction_input:\n prediction_input = {}\n if not model_path:\n raise ValueError('model_path must be provided.')\n if _is_model_name(model_path):\n prediction_input['modelName'] = model_path\n elif _is_model_version_name(model_path):\n prediction_input['versionName'] = model_path\n elif _is_gcs_path(model_path):\n prediction_input['uri'] = model_path\n else:\n raise ValueError('model_path value is invalid.')\n \n if input_paths:\n prediction_input['inputPaths'] = input_paths\n if input_data_format:\n prediction_input['dataFormat'] = input_data_format\n if output_path:\n prediction_input['outputPath'] = output_path\n if output_data_format:\n prediction_input['outputDataFormat'] = output_data_format\n if region:\n prediction_input['region'] = region\n job = {\n 'predictionInput': prediction_input\n }\n create_job(project_id, job, job_id_prefix, wait_interval, job_id_output_path=job_id_output_path)\n \ndef _is_model_name(name):\n return re.match(r'/projects/[^/]+/models/[^/]+$', name)\n\ndef _is_model_version_name(name):\n return re.match(r'/projects/[^/]+/models/[^/]+/versions/[^/]+$', name)\n\ndef _is_gcs_path(name):\n return name.startswith('gs://')", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom fire import decorators\nfrom ._create_job import create_job\n\[email protected](python_version=str, runtime_version=str)\ndef train(project_id, job_id_output_path, python_module=None, package_uris=None, \n region=None, args=None, job_dir=None, python_version=None, \n runtime_version=None, master_image_uri=None, worker_image_uri=None, \n training_input=None, job_id_prefix=None, job_id=None, wait_interval=30):\n \"\"\"Creates a MLEngine training job.\n\n Args:\n project_id (str): Required. The ID of the parent project of the job.\n python_module (str): Required. The Python module name to run after \n installing the packages.\n package_uris (list): Required. The Google Cloud Storage location of \n the packages with the training program and any additional \n dependencies. The maximum number of package URIs is 100.\n region (str): Required. The Google Compute Engine region to run the \n training job in\n args (list): Command line arguments to pass to the program.\n job_dir (str): A Google Cloud Storage path in which to store training \n outputs and other data needed for training. This path is passed \n to your TensorFlow program as the '--job-dir' command-line \n argument. The benefit of specifying this field is that Cloud ML \n validates the path for use in training.\n python_version (str): Optional. The version of Python used in training. \n If not set, the default version is '2.7'. Python '3.5' is \n available when runtimeVersion is set to '1.4' and above. \n Python '2.7' works with all supported runtime versions.\n runtime_version (str): Optional. The Cloud ML Engine runtime version \n to use for training. If not set, Cloud ML Engine uses the \n default stable version, 1.0. \n master_image_uri (str): The Docker image to run on the master replica. \n This image must be in Container Registry.\n worker_image_uri (str): The Docker image to run on the worker replica. \n This image must be in Container Registry.\n training_input (dict): Input parameters to create a training job.\n job_id_prefix (str): the prefix of the generated job id.\n job_id (str): the created job_id, takes precedence over generated job\n id if set.\n wait_interval (int): optional wait interval between calls\n to get job status. Defaults to 30.\n \"\"\"\n if not training_input:\n training_input = {}\n if python_module:\n training_input['pythonModule'] = python_module\n if package_uris:\n training_input['packageUris'] = package_uris\n if region:\n training_input['region'] = region\n if args:\n training_input['args'] = args\n if job_dir:\n training_input['jobDir'] = job_dir\n if python_version:\n training_input['pythonVersion'] = python_version\n if runtime_version:\n training_input['runtimeVersion'] = runtime_version\n if master_image_uri:\n if 'masterConfig' not in training_input:\n training_input['masterConfig'] = {}\n training_input['masterConfig']['imageUri'] = master_image_uri\n if worker_image_uri:\n if 'workerConfig' not in training_input:\n training_input['workerConfig'] = {}\n training_input['workerConfig']['imageUri'] = worker_image_uri\n job = {\n 'trainingInput': training_input\n }\n return create_job(\n project_id=project_id,\n job=job,\n job_id_prefix=job_id_prefix,\n job_id=job_id,\n wait_interval=wait_interval,\n job_id_output_path=job_id_output_path,\n )", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_train.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom ._create_job import create_job\n\ndef batch_predict(project_id, model_path, input_paths, input_data_format, \n output_path, region, job_id_output_path, output_data_format=None, prediction_input=None, job_id_prefix=None,\n wait_interval=30):\n \"\"\"Creates a MLEngine batch prediction job.\n\n Args:\n project_id (str): Required. The ID of the parent project of the job.\n model_path (str): Required. The path to the model. It can be either:\n `projects/[PROJECT_ID]/models/[MODEL_ID]` or \n `projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]`\n or a GCS path of a model file.\n input_paths (list): Required. The Google Cloud Storage location of \n the input data files. May contain wildcards.\n input_data_format (str): Required. The format of the input data files.\n See https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#DataFormat.\n output_path (str): Required. The output Google Cloud Storage location.\n region (str): Required. The Google Compute Engine region to run the \n prediction job in.\n output_data_format (str): Optional. Format of the output data files, \n defaults to JSON.\n prediction_input (dict): Input parameters to create a prediction job.\n job_id_prefix (str): the prefix of the generated job id.\n wait_interval (int): optional wait interval between calls\n to get job status. Defaults to 30.\n \"\"\"\n if not prediction_input:\n prediction_input = {}\n if not model_path:\n raise ValueError('model_path must be provided.')\n if _is_model_name(model_path):\n prediction_input['modelName'] = model_path\n elif _is_model_version_name(model_path):\n prediction_input['versionName'] = model_path\n elif _is_gcs_path(model_path):\n prediction_input['uri'] = model_path\n else:\n raise ValueError('model_path value is invalid.')\n \n if input_paths:\n prediction_input['inputPaths'] = input_paths\n if input_data_format:\n prediction_input['dataFormat'] = input_data_format\n if output_path:\n prediction_input['outputPath'] = output_path\n if output_data_format:\n prediction_input['outputDataFormat'] = output_data_format\n if region:\n prediction_input['region'] = region\n job = {\n 'predictionInput': prediction_input\n }\n create_job(\n project_id=project_id,\n job=job,\n job_id_prefix=job_id_prefix,\n wait_interval=wait_interval,\n job_id_output_path=job_id_output_path,\n )\n \ndef _is_model_name(name):\n return re.match(r'/projects/[^/]+/models/[^/]+$', name)\n\ndef _is_model_version_name(name):\n return re.match(r'/projects/[^/]+/models/[^/]+/versions/[^/]+$', name)\n\ndef _is_gcs_path(name):\n return name.startswith('gs://')", "path": "components/gcp/container/component_sdk/python/kfp_component/google/ml_engine/_batch_predict.py"}]}
| 3,405 | 423 |
gh_patches_debug_33044
|
rasdani/github-patches
|
git_diff
|
vispy__vispy-2135
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove warning on import about missing networkx dependency
Currently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877).
xref napari/napari#2979
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/visuals/graphs/layouts/networkx_layout.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3
4 from ..util import _straight_line_vertices, issparse
5 import numpy as np
6 try:
7 import networkx as nx
8 except ModuleNotFoundError:
9 import warnings
10 warnings.warn(
11 "Networkx not found, please install network to use its layouts")
12 nx = None
13
14
15 class NetworkxCoordinates:
16 def __init__(self, graph=None, layout=None, **kwargs):
17 """
18 Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.
19
20 Parameters
21 ----------
22 graph : a networkx graph.
23 layout : str or dict or iterable-object of float32, optional
24 - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.
25 - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).
26 - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).
27 kwargs: dict, optional
28 when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx
29 """
30 if isinstance(graph, type(None)):
31 raise ValueError("Requires networkx input")
32 self.graph = graph
33 self.positions = np.zeros((len(graph), 2), dtype=np.float32)
34 # default random positions
35 if isinstance(layout, type(None)):
36 self.positions = np.random.rand(*self.positions.shape)
37
38 # check for networkx
39 elif isinstance(layout, str):
40 if nx:
41 if not layout.endswith("_layout"):
42 layout += "_layout" # append for nx
43 layout_function = getattr(nx, layout)
44 if layout_function:
45 self.positions = np.asarray(
46 [i for i in dict(layout_function(graph, **kwargs)).values()])
47 else:
48 raise ValueError("Check networkx for layouts")
49 else:
50 raise ValueError("networkx not found")
51 # assume dict from networkx; values are 2-array
52 elif isinstance(layout, dict):
53 self.positions = np.asarray([i for i in layout.values()])
54
55 # assume given values
56 elif isinstance(layout, np.ndarray):
57 assert layout.ndim == 2
58 assert layout.shape[0] == len(graph)
59 self.positions = layout
60 else:
61 raise ValueError("Input not understood")
62
63 # normalize coordinates
64 self.positions = (self.positions - self.positions.min()) / \
65 (self.positions.max() - self.positions.min())
66 self.positions = self.positions.astype(np.float32)
67
68 def __call__(self, adjacency_mat, directed=False):
69 """
70 Parameters
71 ----------
72 adjacency_mat : sparse adjacency matrix.
73 directed : bool, default False
74
75 Returns
76 ---------
77 (node_vertices, line_vertices, arrow_vertices) : tuple
78 Yields the node and line vertices in a tuple. This layout only yields a
79 single time, and has no builtin animation
80 """
81 if issparse(adjacency_mat):
82 adjacency_mat = adjacency_mat.tocoo()
83 line_vertices, arrows = _straight_line_vertices(
84 adjacency_mat, self.positions, directed)
85
86 yield self.positions, line_vertices, arrows
87
88 @property
89 def adj(self):
90 """Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function."""
91 return nx.adjacency_matrix(self.graph)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py
--- a/vispy/visuals/graphs/layouts/networkx_layout.py
+++ b/vispy/visuals/graphs/layouts/networkx_layout.py
@@ -6,9 +6,6 @@
try:
import networkx as nx
except ModuleNotFoundError:
- import warnings
- warnings.warn(
- "Networkx not found, please install network to use its layouts")
nx = None
@@ -27,6 +24,8 @@
kwargs: dict, optional
when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx
"""
+ if nx is None:
+ raise ValueError("networkx not found, please install networkx to use its layouts")
if isinstance(graph, type(None)):
raise ValueError("Requires networkx input")
self.graph = graph
@@ -37,17 +36,14 @@
# check for networkx
elif isinstance(layout, str):
- if nx:
- if not layout.endswith("_layout"):
- layout += "_layout" # append for nx
- layout_function = getattr(nx, layout)
- if layout_function:
- self.positions = np.asarray(
- [i for i in dict(layout_function(graph, **kwargs)).values()])
- else:
- raise ValueError("Check networkx for layouts")
+ if not layout.endswith("_layout"):
+ layout += "_layout" # append for nx
+ layout_function = getattr(nx, layout)
+ if layout_function:
+ self.positions = np.asarray(
+ [i for i in dict(layout_function(graph, **kwargs)).values()])
else:
- raise ValueError("networkx not found")
+ raise ValueError("Check networkx for layouts")
# assume dict from networkx; values are 2-array
elif isinstance(layout, dict):
self.positions = np.asarray([i for i in layout.values()])
|
{"golden_diff": "diff --git a/vispy/visuals/graphs/layouts/networkx_layout.py b/vispy/visuals/graphs/layouts/networkx_layout.py\n--- a/vispy/visuals/graphs/layouts/networkx_layout.py\n+++ b/vispy/visuals/graphs/layouts/networkx_layout.py\n@@ -6,9 +6,6 @@\n try:\n import networkx as nx\n except ModuleNotFoundError:\n- import warnings\n- warnings.warn(\n- \"Networkx not found, please install network to use its layouts\")\n nx = None\n \n \n@@ -27,6 +24,8 @@\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n+ if nx is None:\n+ raise ValueError(\"networkx not found, please install networkx to use its layouts\")\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n@@ -37,17 +36,14 @@\n \n # check for networkx\n elif isinstance(layout, str):\n- if nx:\n- if not layout.endswith(\"_layout\"):\n- layout += \"_layout\" # append for nx\n- layout_function = getattr(nx, layout)\n- if layout_function:\n- self.positions = np.asarray(\n- [i for i in dict(layout_function(graph, **kwargs)).values()])\n- else:\n- raise ValueError(\"Check networkx for layouts\")\n+ if not layout.endswith(\"_layout\"):\n+ layout += \"_layout\" # append for nx\n+ layout_function = getattr(nx, layout)\n+ if layout_function:\n+ self.positions = np.asarray(\n+ [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n- raise ValueError(\"networkx not found\")\n+ raise ValueError(\"Check networkx for layouts\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n", "issue": "Remove warning on import about missing networkx dependency\nCurrently, importing vispy (or some submodule) without networkx installed results in a warning about installing networkx, even if the user/downstream library has no intention of using the graph layout. The warning should be delayed and turned into an error, as noted by @djhoese [here](https://github.com/napari/napari/issues/2979#issuecomment-874159877).\r\n\r\nxref napari/napari#2979\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3\nfrom ..util import _straight_line_vertices, issparse\nimport numpy as np\ntry:\n import networkx as nx\nexcept ModuleNotFoundError:\n import warnings\n warnings.warn(\n \"Networkx not found, please install network to use its layouts\")\n nx = None\n\n\nclass NetworkxCoordinates:\n def __init__(self, graph=None, layout=None, **kwargs):\n \"\"\"\n Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.\n\n Parameters\n ----------\n graph : a networkx graph.\n layout : str or dict or iterable-object of float32, optional\n - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.\n - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).\n - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n self.positions = np.zeros((len(graph), 2), dtype=np.float32)\n # default random positions\n if isinstance(layout, type(None)):\n self.positions = np.random.rand(*self.positions.shape)\n\n # check for networkx\n elif isinstance(layout, str):\n if nx:\n if not layout.endswith(\"_layout\"):\n layout += \"_layout\" # append for nx\n layout_function = getattr(nx, layout)\n if layout_function:\n self.positions = np.asarray(\n [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n raise ValueError(\"Check networkx for layouts\")\n else:\n raise ValueError(\"networkx not found\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n\n # assume given values\n elif isinstance(layout, np.ndarray):\n assert layout.ndim == 2\n assert layout.shape[0] == len(graph)\n self.positions = layout\n else:\n raise ValueError(\"Input not understood\")\n\n # normalize coordinates\n self.positions = (self.positions - self.positions.min()) / \\\n (self.positions.max() - self.positions.min())\n self.positions = self.positions.astype(np.float32)\n\n def __call__(self, adjacency_mat, directed=False):\n \"\"\"\n Parameters\n ----------\n adjacency_mat : sparse adjacency matrix.\n directed : bool, default False\n\n Returns\n ---------\n (node_vertices, line_vertices, arrow_vertices) : tuple\n Yields the node and line vertices in a tuple. This layout only yields a\n single time, and has no builtin animation\n \"\"\"\n if issparse(adjacency_mat):\n adjacency_mat = adjacency_mat.tocoo()\n line_vertices, arrows = _straight_line_vertices(\n adjacency_mat, self.positions, directed)\n\n yield self.positions, line_vertices, arrows\n\n @property\n def adj(self):\n \"\"\"Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.\"\"\"\n return nx.adjacency_matrix(self.graph)\n", "path": "vispy/visuals/graphs/layouts/networkx_layout.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.#!/usr/bin/env python3\nfrom ..util import _straight_line_vertices, issparse\nimport numpy as np\ntry:\n import networkx as nx\nexcept ModuleNotFoundError:\n nx = None\n\n\nclass NetworkxCoordinates:\n def __init__(self, graph=None, layout=None, **kwargs):\n \"\"\"\n Converts :graph: into a layout. Can be used in conjunction with networkx layouts or using raw 2D-numpy arrays.\n\n Parameters\n ----------\n graph : a networkx graph.\n layout : str or dict or iterable-object of float32, optional\n - When :layout: is s string, a lookup will be performed in the networkx avaiable layouts.\n - When :layout: is a dict, it will be assumed that it takes the shape (key, value) = (node_id, 2D-coordinate).\n - When :layout: is numpy array it is assumed it takes the shape (number_of_nodes, 2).\n kwargs: dict, optional\n when layout is :str: :kwargs: will act as a setting dictionary for the layout function of networkx\n \"\"\"\n if nx is None:\n raise ValueError(\"networkx not found, please install networkx to use its layouts\")\n if isinstance(graph, type(None)):\n raise ValueError(\"Requires networkx input\")\n self.graph = graph\n self.positions = np.zeros((len(graph), 2), dtype=np.float32)\n # default random positions\n if isinstance(layout, type(None)):\n self.positions = np.random.rand(*self.positions.shape)\n\n # check for networkx\n elif isinstance(layout, str):\n if not layout.endswith(\"_layout\"):\n layout += \"_layout\" # append for nx\n layout_function = getattr(nx, layout)\n if layout_function:\n self.positions = np.asarray(\n [i for i in dict(layout_function(graph, **kwargs)).values()])\n else:\n raise ValueError(\"Check networkx for layouts\")\n # assume dict from networkx; values are 2-array\n elif isinstance(layout, dict):\n self.positions = np.asarray([i for i in layout.values()])\n\n # assume given values\n elif isinstance(layout, np.ndarray):\n assert layout.ndim == 2\n assert layout.shape[0] == len(graph)\n self.positions = layout\n else:\n raise ValueError(\"Input not understood\")\n\n # normalize coordinates\n self.positions = (self.positions - self.positions.min()) / \\\n (self.positions.max() - self.positions.min())\n self.positions = self.positions.astype(np.float32)\n\n def __call__(self, adjacency_mat, directed=False):\n \"\"\"\n Parameters\n ----------\n adjacency_mat : sparse adjacency matrix.\n directed : bool, default False\n\n Returns\n ---------\n (node_vertices, line_vertices, arrow_vertices) : tuple\n Yields the node and line vertices in a tuple. This layout only yields a\n single time, and has no builtin animation\n \"\"\"\n if issparse(adjacency_mat):\n adjacency_mat = adjacency_mat.tocoo()\n line_vertices, arrows = _straight_line_vertices(\n adjacency_mat, self.positions, directed)\n\n yield self.positions, line_vertices, arrows\n\n @property\n def adj(self):\n \"\"\"Convenient storage and holder of the adjacency matrix for the :scene.visuals.Graph: function.\"\"\"\n return nx.adjacency_matrix(self.graph)\n", "path": "vispy/visuals/graphs/layouts/networkx_layout.py"}]}
| 1,333 | 447 |
gh_patches_debug_59198
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-319
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix filter calls
Similar with the calls to `map`, these calls must be fixed as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tcms/testplans/helpers/email.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from django.conf import settings
3
4 from tcms.core.utils.mailto import send_email_using_threading
5
6
7 def email_plan_update(plan):
8 recipients = get_plan_notification_recipients(plan)
9 if len(recipients) == 0:
10 return
11 subject = u'TestPlan %s has been updated.' % plan.pk
12 send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,
13 recipients, {'plan': plan})
14
15
16 def email_plan_deletion(plan):
17 recipients = get_plan_notification_recipients(plan)
18 if len(recipients) == 0:
19 return
20 subject = u'TestPlan %s has been deleted.' % plan.pk
21 send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,
22 recipients, {'plan': plan})
23
24
25 def get_plan_notification_recipients(plan):
26 recipients = set()
27 if plan.owner:
28 if plan.email_settings.auto_to_plan_owner:
29 recipients.add(plan.owner.email)
30 if plan.email_settings.auto_to_plan_author:
31 recipients.add(plan.author.email)
32 if plan.email_settings.auto_to_case_owner:
33 case_authors = plan.case.values_list('author__email', flat=True)
34 recipients.update(case_authors)
35 if plan.email_settings.auto_to_case_default_tester:
36 case_testers = plan.case.values_list('default_tester__email',
37 flat=True)
38 recipients.update(case_testers)
39 return filter(lambda e: bool(e), recipients)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py
--- a/tcms/testplans/helpers/email.py
+++ b/tcms/testplans/helpers/email.py
@@ -36,4 +36,4 @@
case_testers = plan.case.values_list('default_tester__email',
flat=True)
recipients.update(case_testers)
- return filter(lambda e: bool(e), recipients)
+ return [r for r in recipients if r]
|
{"golden_diff": "diff --git a/tcms/testplans/helpers/email.py b/tcms/testplans/helpers/email.py\n--- a/tcms/testplans/helpers/email.py\n+++ b/tcms/testplans/helpers/email.py\n@@ -36,4 +36,4 @@\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n- return filter(lambda e: bool(e), recipients)\n+ return [r for r in recipients if r]\n", "issue": "Fix filter calls\nSimilar with the calls to `map`, these calls must be fixed as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\n\nfrom tcms.core.utils.mailto import send_email_using_threading\n\n\ndef email_plan_update(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been updated.' % plan.pk\n send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef email_plan_deletion(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been deleted.' % plan.pk\n send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef get_plan_notification_recipients(plan):\n recipients = set()\n if plan.owner:\n if plan.email_settings.auto_to_plan_owner:\n recipients.add(plan.owner.email)\n if plan.email_settings.auto_to_plan_author:\n recipients.add(plan.author.email)\n if plan.email_settings.auto_to_case_owner:\n case_authors = plan.case.values_list('author__email', flat=True)\n recipients.update(case_authors)\n if plan.email_settings.auto_to_case_default_tester:\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n return filter(lambda e: bool(e), recipients)\n", "path": "tcms/testplans/helpers/email.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.conf import settings\n\nfrom tcms.core.utils.mailto import send_email_using_threading\n\n\ndef email_plan_update(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been updated.' % plan.pk\n send_email_using_threading(settings.PLAN_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef email_plan_deletion(plan):\n recipients = get_plan_notification_recipients(plan)\n if len(recipients) == 0:\n return\n subject = u'TestPlan %s has been deleted.' % plan.pk\n send_email_using_threading(settings.PLAN_DELELE_EMAIL_TEMPLATE, subject,\n recipients, {'plan': plan})\n\n\ndef get_plan_notification_recipients(plan):\n recipients = set()\n if plan.owner:\n if plan.email_settings.auto_to_plan_owner:\n recipients.add(plan.owner.email)\n if plan.email_settings.auto_to_plan_author:\n recipients.add(plan.author.email)\n if plan.email_settings.auto_to_case_owner:\n case_authors = plan.case.values_list('author__email', flat=True)\n recipients.update(case_authors)\n if plan.email_settings.auto_to_case_default_tester:\n case_testers = plan.case.values_list('default_tester__email',\n flat=True)\n recipients.update(case_testers)\n return [r for r in recipients if r]\n", "path": "tcms/testplans/helpers/email.py"}]}
| 663 | 103 |
gh_patches_debug_33011
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-3176
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
System Package manager should not update package index if CONAN_SYSREQUIRES_MODE=verify
Currently the SystemPackageManager.update() performs an update (which needs sudo rights!) if CONAN_SYSREQUIRES_MODE is anything else than "disabled". "verify" should also not update, I think.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/tools/system_pm.py`
Content:
```
1 import os
2 from conans.client.runner import ConanRunner
3 from conans.client.tools.oss import OSInfo
4 from conans.errors import ConanException
5 from conans.util.env_reader import get_env
6
7 _global_output = None
8
9
10 class SystemPackageTool(object):
11
12 def __init__(self, runner=None, os_info=None, tool=None, recommends=False):
13 os_info = os_info or OSInfo()
14 self._is_up_to_date = False
15 self._tool = tool or self._create_tool(os_info)
16 self._tool._sudo_str = "sudo " if self._is_sudo_enabled() else ""
17 self._tool._runner = runner or ConanRunner()
18 self._tool._recommends = recommends
19
20 @staticmethod
21 def _is_sudo_enabled():
22 if "CONAN_SYSREQUIRES_SUDO" not in os.environ:
23 if os.name == 'posix' and os.geteuid() == 0:
24 return False
25 if os.name == 'nt':
26 return False
27 return get_env("CONAN_SYSREQUIRES_SUDO", True)
28
29 @staticmethod
30 def _get_sysrequire_mode():
31 allowed_modes= ("enabled", "verify", "disabled")
32 mode = get_env("CONAN_SYSREQUIRES_MODE", "enabled")
33 mode_lower = mode.lower()
34 if mode_lower not in allowed_modes:
35 raise ConanException("CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r" % (mode, allowed_modes))
36 return mode_lower
37
38 @staticmethod
39 def _create_tool(os_info):
40 if os_info.with_apt:
41 return AptTool()
42 elif os_info.with_yum:
43 return YumTool()
44 elif os_info.with_pacman:
45 return PacManTool()
46 elif os_info.is_macos:
47 return BrewTool()
48 elif os_info.is_freebsd:
49 return PkgTool()
50 elif os_info.is_solaris:
51 return PkgUtilTool()
52 elif os_info.with_zypper:
53 return ZypperTool()
54 else:
55 return NullTool()
56
57 def update(self):
58 """
59 Get the system package tool update command
60 """
61 mode = self._get_sysrequire_mode()
62 if mode == "disabled":
63 _global_output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=Disabled")
64 return
65 self._is_up_to_date = True
66 self._tool.update()
67
68 def install(self, packages, update=True, force=False):
69 """
70 Get the system package tool install command.
71 '"""
72 packages = [packages] if isinstance(packages, str) else list(packages)
73
74 mode = self._get_sysrequire_mode()
75
76 if mode in ("verify", "disabled"):
77 # Report to output packages need to be installed
78 if mode == "disabled":
79 _global_output.info("The following packages need to be installed:\n %s" % "\n".join(packages))
80 return
81
82 if mode == "verify" and not self._installed(packages):
83 _global_output.error("The following packages need to be installed:\n %s" % "\n".join(packages))
84 raise ConanException(
85 "Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed" % mode
86 )
87
88 if not force and self._installed(packages):
89 return
90
91 # From here system packages can be updated/modified
92 if update and not self._is_up_to_date:
93 self.update()
94 self._install_any(packages)
95
96 def _installed(self, packages):
97 if not packages:
98 return True
99
100 for pkg in packages:
101 if self._tool.installed(pkg):
102 _global_output.info("Package already installed: %s" % pkg)
103 return True
104 return False
105
106 def _install_any(self, packages):
107 if len(packages) == 1:
108 return self._tool.install(packages[0])
109 for pkg in packages:
110 try:
111 return self._tool.install(pkg)
112 except ConanException:
113 pass
114 raise ConanException("Could not install any of %s" % packages)
115
116
117 class NullTool(object):
118 def update(self):
119 pass
120
121 def install(self, package_name):
122 _global_output.warn("Only available for linux with apt-get, yum, or pacman or OSX with brew or "
123 "FreeBSD with pkg or Solaris with pkgutil")
124
125 def installed(self, package_name):
126 return False
127
128
129 class AptTool(object):
130 def update(self):
131 _run(self._runner, "%sapt-get update" % self._sudo_str)
132
133 def install(self, package_name):
134 recommends_str = '' if self._recommends else '--no-install-recommends '
135 _run(self._runner, "%sapt-get install -y %s%s" % (self._sudo_str, recommends_str, package_name))
136
137 def installed(self, package_name):
138 exit_code = self._runner("dpkg -s %s" % package_name, None)
139 return exit_code == 0
140
141
142 class YumTool(object):
143 def update(self):
144 _run(self._runner, "%syum check-update" % self._sudo_str, accepted_returns=[0, 100])
145
146 def install(self, package_name):
147 _run(self._runner, "%syum install -y %s" % (self._sudo_str, package_name))
148
149 def installed(self, package_name):
150 exit_code = self._runner("rpm -q %s" % package_name, None)
151 return exit_code == 0
152
153
154 class BrewTool(object):
155 def update(self):
156 _run(self._runner, "brew update")
157
158 def install(self, package_name):
159 _run(self._runner, "brew install %s" % package_name)
160
161 def installed(self, package_name):
162 exit_code = self._runner('test -n "$(brew ls --versions %s)"' % package_name, None)
163 return exit_code == 0
164
165
166 class PkgTool(object):
167 def update(self):
168 _run(self._runner, "%spkg update" % self._sudo_str)
169
170 def install(self, package_name):
171 _run(self._runner, "%spkg install -y %s" % (self._sudo_str, package_name))
172
173 def installed(self, package_name):
174 exit_code = self._runner("pkg info %s" % package_name, None)
175 return exit_code == 0
176
177
178 class PkgUtilTool(object):
179 def update(self):
180 _run(self._runner, "%spkgutil --catalog" % self._sudo_str)
181
182 def install(self, package_name):
183 _run(self._runner, "%spkgutil --install --yes %s" % (self._sudo_str, package_name))
184
185 def installed(self, package_name):
186 exit_code = self._runner('test -n "`pkgutil --list %s`"' % package_name, None)
187 return exit_code == 0
188
189
190 class ChocolateyTool(object):
191 def update(self):
192 _run(self._runner, "choco outdated")
193
194 def install(self, package_name):
195 _run(self._runner, "choco install --yes %s" % package_name)
196
197 def installed(self, package_name):
198 exit_code = self._runner('choco search --local-only --exact %s | findstr /c:"1 packages installed."' % package_name, None)
199 return exit_code == 0
200
201
202 class PacManTool(object):
203 def update(self):
204 _run(self._runner, "%spacman -Syyu --noconfirm" % self._sudo_str)
205
206 def install(self, package_name):
207 _run(self._runner, "%spacman -S --noconfirm %s" % (self._sudo_str, package_name))
208
209 def installed(self, package_name):
210 exit_code = self._runner("pacman -Qi %s" % package_name, None)
211 return exit_code == 0
212
213
214 class ZypperTool(object):
215 def update(self):
216 _run(self._runner, "%szypper --non-interactive ref" % self._sudo_str)
217
218 def install(self, package_name):
219 _run(self._runner, "%szypper --non-interactive in %s" % (self._sudo_str, package_name))
220
221 def installed(self, package_name):
222 exit_code = self._runner("rpm -q %s" % package_name, None)
223 return exit_code == 0
224
225 def _run(runner, command, accepted_returns=None):
226 accepted_returns = accepted_returns or [0, ]
227 _global_output.info("Running: %s" % command)
228 if runner(command, True) not in accepted_returns:
229 raise ConanException("Command '%s' failed" % command)
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py
--- a/conans/client/tools/system_pm.py
+++ b/conans/client/tools/system_pm.py
@@ -28,7 +28,7 @@
@staticmethod
def _get_sysrequire_mode():
- allowed_modes= ("enabled", "verify", "disabled")
+ allowed_modes = ("enabled", "verify", "disabled")
mode = get_env("CONAN_SYSREQUIRES_MODE", "enabled")
mode_lower = mode.lower()
if mode_lower not in allowed_modes:
@@ -59,8 +59,8 @@
Get the system package tool update command
"""
mode = self._get_sysrequire_mode()
- if mode == "disabled":
- _global_output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=Disabled")
+ if mode in ("disabled", "verify"):
+ _global_output.info("Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s" % mode)
return
self._is_up_to_date = True
self._tool.update()
@@ -195,7 +195,8 @@
_run(self._runner, "choco install --yes %s" % package_name)
def installed(self, package_name):
- exit_code = self._runner('choco search --local-only --exact %s | findstr /c:"1 packages installed."' % package_name, None)
+ exit_code = self._runner('choco search --local-only --exact %s | '
+ 'findstr /c:"1 packages installed."' % package_name, None)
return exit_code == 0
@@ -222,6 +223,7 @@
exit_code = self._runner("rpm -q %s" % package_name, None)
return exit_code == 0
+
def _run(runner, command, accepted_returns=None):
accepted_returns = accepted_returns or [0, ]
_global_output.info("Running: %s" % command)
|
{"golden_diff": "diff --git a/conans/client/tools/system_pm.py b/conans/client/tools/system_pm.py\n--- a/conans/client/tools/system_pm.py\n+++ b/conans/client/tools/system_pm.py\n@@ -28,7 +28,7 @@\n \n @staticmethod\n def _get_sysrequire_mode():\n- allowed_modes= (\"enabled\", \"verify\", \"disabled\")\n+ allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n@@ -59,8 +59,8 @@\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n- if mode == \"disabled\":\n- _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=Disabled\")\n+ if mode in (\"disabled\", \"verify\"):\n+ _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n@@ -195,7 +195,8 @@\n _run(self._runner, \"choco install --yes %s\" % package_name)\n \n def installed(self, package_name):\n- exit_code = self._runner('choco search --local-only --exact %s | findstr /c:\"1 packages installed.\"' % package_name, None)\n+ exit_code = self._runner('choco search --local-only --exact %s | '\n+ 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n \n \n@@ -222,6 +223,7 @@\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n \n+\n def _run(runner, command, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n _global_output.info(\"Running: %s\" % command)\n", "issue": "System Package manager should not update package index if CONAN_SYSREQUIRES_MODE=verify\nCurrently the SystemPackageManager.update() performs an update (which needs sudo rights!) if CONAN_SYSREQUIRES_MODE is anything else than \"disabled\". \"verify\" should also not update, I think.\r\n\r\n\n", "before_files": [{"content": "import os\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\n\n_global_output = None\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False):\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info)\n self._tool._sudo_str = \"sudo \" if self._is_sudo_enabled() else \"\"\n self._tool._runner = runner or ConanRunner()\n self._tool._recommends = recommends\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes= (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\" % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info):\n if os_info.with_apt:\n return AptTool()\n elif os_info.with_yum:\n return YumTool()\n elif os_info.with_pacman:\n return PacManTool()\n elif os_info.is_macos:\n return BrewTool()\n elif os_info.is_freebsd:\n return PkgTool()\n elif os_info.is_solaris:\n return PkgUtilTool()\n elif os_info.with_zypper:\n return ZypperTool()\n else:\n return NullTool()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode == \"disabled\":\n _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=Disabled\")\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n _global_output.info(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n _global_output.error(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n raise ConanException(\n \"Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed\" % mode\n )\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n _global_output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass NullTool(object):\n def update(self):\n pass\n\n def install(self, package_name):\n _global_output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or \"\n \"FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(object):\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner, \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg -s %s\" % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(object):\n def update(self):\n _run(self._runner, \"%syum check-update\" % self._sudo_str, accepted_returns=[0, 100])\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(object):\n def update(self):\n _run(self._runner, \"brew update\")\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(object):\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(object):\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(object):\n def update(self):\n _run(self._runner, \"choco outdated\")\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(object):\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(object):\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\ndef _run(runner, command, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n _global_output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}], "after_files": [{"content": "import os\nfrom conans.client.runner import ConanRunner\nfrom conans.client.tools.oss import OSInfo\nfrom conans.errors import ConanException\nfrom conans.util.env_reader import get_env\n\n_global_output = None\n\n\nclass SystemPackageTool(object):\n\n def __init__(self, runner=None, os_info=None, tool=None, recommends=False):\n os_info = os_info or OSInfo()\n self._is_up_to_date = False\n self._tool = tool or self._create_tool(os_info)\n self._tool._sudo_str = \"sudo \" if self._is_sudo_enabled() else \"\"\n self._tool._runner = runner or ConanRunner()\n self._tool._recommends = recommends\n\n @staticmethod\n def _is_sudo_enabled():\n if \"CONAN_SYSREQUIRES_SUDO\" not in os.environ:\n if os.name == 'posix' and os.geteuid() == 0:\n return False\n if os.name == 'nt':\n return False\n return get_env(\"CONAN_SYSREQUIRES_SUDO\", True)\n\n @staticmethod\n def _get_sysrequire_mode():\n allowed_modes = (\"enabled\", \"verify\", \"disabled\")\n mode = get_env(\"CONAN_SYSREQUIRES_MODE\", \"enabled\")\n mode_lower = mode.lower()\n if mode_lower not in allowed_modes:\n raise ConanException(\"CONAN_SYSREQUIRES_MODE=%s is not allowed, allowed modes=%r\" % (mode, allowed_modes))\n return mode_lower\n\n @staticmethod\n def _create_tool(os_info):\n if os_info.with_apt:\n return AptTool()\n elif os_info.with_yum:\n return YumTool()\n elif os_info.with_pacman:\n return PacManTool()\n elif os_info.is_macos:\n return BrewTool()\n elif os_info.is_freebsd:\n return PkgTool()\n elif os_info.is_solaris:\n return PkgUtilTool()\n elif os_info.with_zypper:\n return ZypperTool()\n else:\n return NullTool()\n\n def update(self):\n \"\"\"\n Get the system package tool update command\n \"\"\"\n mode = self._get_sysrequire_mode()\n if mode in (\"disabled\", \"verify\"):\n _global_output.info(\"Not updating system_requirements. CONAN_SYSREQUIRES_MODE=%s\" % mode)\n return\n self._is_up_to_date = True\n self._tool.update()\n\n def install(self, packages, update=True, force=False):\n \"\"\"\n Get the system package tool install command.\n '\"\"\"\n packages = [packages] if isinstance(packages, str) else list(packages)\n\n mode = self._get_sysrequire_mode()\n\n if mode in (\"verify\", \"disabled\"):\n # Report to output packages need to be installed\n if mode == \"disabled\":\n _global_output.info(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n return\n\n if mode == \"verify\" and not self._installed(packages):\n _global_output.error(\"The following packages need to be installed:\\n %s\" % \"\\n\".join(packages))\n raise ConanException(\n \"Aborted due to CONAN_SYSREQUIRES_MODE=%s. Some system packages need to be installed\" % mode\n )\n\n if not force and self._installed(packages):\n return\n\n # From here system packages can be updated/modified\n if update and not self._is_up_to_date:\n self.update()\n self._install_any(packages)\n\n def _installed(self, packages):\n if not packages:\n return True\n\n for pkg in packages:\n if self._tool.installed(pkg):\n _global_output.info(\"Package already installed: %s\" % pkg)\n return True\n return False\n\n def _install_any(self, packages):\n if len(packages) == 1:\n return self._tool.install(packages[0])\n for pkg in packages:\n try:\n return self._tool.install(pkg)\n except ConanException:\n pass\n raise ConanException(\"Could not install any of %s\" % packages)\n\n\nclass NullTool(object):\n def update(self):\n pass\n\n def install(self, package_name):\n _global_output.warn(\"Only available for linux with apt-get, yum, or pacman or OSX with brew or \"\n \"FreeBSD with pkg or Solaris with pkgutil\")\n\n def installed(self, package_name):\n return False\n\n\nclass AptTool(object):\n def update(self):\n _run(self._runner, \"%sapt-get update\" % self._sudo_str)\n\n def install(self, package_name):\n recommends_str = '' if self._recommends else '--no-install-recommends '\n _run(self._runner, \"%sapt-get install -y %s%s\" % (self._sudo_str, recommends_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"dpkg -s %s\" % package_name, None)\n return exit_code == 0\n\n\nclass YumTool(object):\n def update(self):\n _run(self._runner, \"%syum check-update\" % self._sudo_str, accepted_returns=[0, 100])\n\n def install(self, package_name):\n _run(self._runner, \"%syum install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\nclass BrewTool(object):\n def update(self):\n _run(self._runner, \"brew update\")\n\n def install(self, package_name):\n _run(self._runner, \"brew install %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"$(brew ls --versions %s)\"' % package_name, None)\n return exit_code == 0\n\n\nclass PkgTool(object):\n def update(self):\n _run(self._runner, \"%spkg update\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkg install -y %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pkg info %s\" % package_name, None)\n return exit_code == 0\n\n\nclass PkgUtilTool(object):\n def update(self):\n _run(self._runner, \"%spkgutil --catalog\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spkgutil --install --yes %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner('test -n \"`pkgutil --list %s`\"' % package_name, None)\n return exit_code == 0\n\n\nclass ChocolateyTool(object):\n def update(self):\n _run(self._runner, \"choco outdated\")\n\n def install(self, package_name):\n _run(self._runner, \"choco install --yes %s\" % package_name)\n\n def installed(self, package_name):\n exit_code = self._runner('choco search --local-only --exact %s | '\n 'findstr /c:\"1 packages installed.\"' % package_name, None)\n return exit_code == 0\n\n\nclass PacManTool(object):\n def update(self):\n _run(self._runner, \"%spacman -Syyu --noconfirm\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%spacman -S --noconfirm %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"pacman -Qi %s\" % package_name, None)\n return exit_code == 0\n\n\nclass ZypperTool(object):\n def update(self):\n _run(self._runner, \"%szypper --non-interactive ref\" % self._sudo_str)\n\n def install(self, package_name):\n _run(self._runner, \"%szypper --non-interactive in %s\" % (self._sudo_str, package_name))\n\n def installed(self, package_name):\n exit_code = self._runner(\"rpm -q %s\" % package_name, None)\n return exit_code == 0\n\n\ndef _run(runner, command, accepted_returns=None):\n accepted_returns = accepted_returns or [0, ]\n _global_output.info(\"Running: %s\" % command)\n if runner(command, True) not in accepted_returns:\n raise ConanException(\"Command '%s' failed\" % command)\n", "path": "conans/client/tools/system_pm.py"}]}
| 2,811 | 447 |
gh_patches_debug_23803
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-782
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Report errors while installing default skills
Installing default skills may fail quietly in some conditions, ideally some feedback to the user should be given.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/skills/main.py`
Content:
```
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import json
20 import os
21 import subprocess
22 import sys
23 import time
24 from os.path import exists, join
25 from threading import Timer
26
27 from mycroft import MYCROFT_ROOT_PATH
28 from mycroft.configuration import ConfigurationManager
29 from mycroft.lock import Lock # Creates PID file for single instance
30 from mycroft.messagebus.client.ws import WebsocketClient
31 from mycroft.messagebus.message import Message
32 from mycroft.skills.core import load_skill, create_skill_descriptor, \
33 MainModule, SKILLS_DIR
34 from mycroft.skills.intent_service import IntentService
35 from mycroft.util import connected
36 from mycroft.util.log import getLogger
37 import mycroft.dialog
38
39 logger = getLogger("Skills")
40
41 __author__ = 'seanfitz'
42
43 ws = None
44 loaded_skills = {}
45 last_modified_skill = 0
46 skills_directories = []
47 skill_reload_thread = None
48 skills_manager_timer = None
49
50 installer_config = ConfigurationManager.instance().get("SkillInstallerSkill")
51 MSM_BIN = installer_config.get("path", join(MYCROFT_ROOT_PATH, 'msm', 'msm'))
52
53
54 def connect():
55 global ws
56 ws.run_forever()
57
58
59 def install_default_skills(speak=True):
60 if exists(MSM_BIN):
61 p = subprocess.Popen(MSM_BIN + " default", stderr=subprocess.STDOUT,
62 stdout=subprocess.PIPE, shell=True)
63 t = p.communicate()[0]
64 if t.splitlines()[-1] == "Installed!" and speak:
65 ws.emit(Message("speak", {
66 'utterance': mycroft.dialog.get("skills updated")}))
67 elif not connected():
68 ws.emit(Message("speak", {
69 'utterance': mycroft.dialog.get("no network connection")}))
70
71 else:
72 logger.error("Unable to invoke Mycroft Skill Manager: " + MSM_BIN)
73
74
75 def skills_manager(message):
76 global skills_manager_timer, ws
77
78 if connected():
79 if skills_manager_timer is None:
80 ws.emit(
81 Message("speak", {'utterance':
82 mycroft.dialog.get("checking for updates")}))
83
84 # Install default skills and look for updates via Github
85 logger.debug("==== Invoking Mycroft Skill Manager: " + MSM_BIN)
86 install_default_skills(False)
87
88 # Perform check again once and hour
89 skills_manager_timer = Timer(3600, _skills_manager_dispatch)
90 skills_manager_timer.daemon = True
91 skills_manager_timer.start()
92
93
94 def _skills_manager_dispatch():
95 ws.emit(Message("skill_manager", {}))
96
97
98 def _load_skills():
99 global ws, loaded_skills, last_modified_skill, skills_directories, \
100 skill_reload_thread
101
102 check_connection()
103
104 # Create skill_manager listener and invoke the first time
105 ws.on('skill_manager', skills_manager)
106 ws.on('mycroft.internet.connected', install_default_skills)
107 ws.emit(Message('skill_manager', {}))
108
109 # Create the Intent manager, which converts utterances to intents
110 # This is the heart of the voice invoked skill system
111 IntentService(ws)
112
113 # Create a thread that monitors the loaded skills, looking for updates
114 skill_reload_thread = Timer(0, _watch_skills)
115 skill_reload_thread.daemon = True
116 skill_reload_thread.start()
117
118
119 def check_connection():
120 if connected():
121 ws.emit(Message('mycroft.internet.connected'))
122 else:
123 thread = Timer(1, check_connection)
124 thread.daemon = True
125 thread.start()
126
127
128 def _get_last_modified_date(path):
129 last_date = 0
130 # getting all recursive paths
131 for root, _, _ in os.walk(path):
132 f = root.replace(path, "")
133 # checking if is a hidden path
134 if not f.startswith(".") and not f.startswith("/."):
135 last_date = max(last_date, os.path.getmtime(path + f))
136
137 return last_date
138
139
140 def _watch_skills():
141 global ws, loaded_skills, last_modified_skill, \
142 id_counter
143
144 # Scan the file folder that contains Skills. If a Skill is updated,
145 # unload the existing version from memory and reload from the disk.
146 while True:
147 if exists(SKILLS_DIR):
148 # checking skills dir and getting all skills there
149 list = filter(lambda x: os.path.isdir(
150 os.path.join(SKILLS_DIR, x)), os.listdir(SKILLS_DIR))
151
152 for skill_folder in list:
153 if skill_folder not in loaded_skills:
154 loaded_skills[skill_folder] = {}
155 skill = loaded_skills.get(skill_folder)
156 skill["path"] = os.path.join(SKILLS_DIR, skill_folder)
157 # checking if is a skill
158 if not MainModule + ".py" in os.listdir(skill["path"]):
159 continue
160 # getting the newest modified date of skill
161 skill["last_modified"] = _get_last_modified_date(skill["path"])
162 modified = skill.get("last_modified", 0)
163 # checking if skill is loaded and wasn't modified
164 if skill.get(
165 "loaded") and modified <= last_modified_skill:
166 continue
167 # checking if skill was modified
168 elif skill.get(
169 "instance") and modified > last_modified_skill:
170 # checking if skill should be reloaded
171 if not skill["instance"].reload_skill:
172 continue
173 logger.debug("Reloading Skill: " + skill_folder)
174 # removing listeners and stopping threads
175 skill["instance"].shutdown()
176 del skill["instance"]
177 skill["loaded"] = True
178 skill["instance"] = load_skill(
179 create_skill_descriptor(skill["path"]), ws)
180 # get the last modified skill
181 modified_dates = map(lambda x: x.get("last_modified"),
182 loaded_skills.values())
183 if len(modified_dates) > 0:
184 last_modified_skill = max(modified_dates)
185
186 # Pause briefly before beginning next scan
187 time.sleep(2)
188
189
190 def main():
191 global ws
192 lock = Lock('skills') # prevent multiple instances of this service
193
194 # Connect this Skill management process to the websocket
195 ws = WebsocketClient()
196 ConfigurationManager.init(ws)
197
198 ignore_logs = ConfigurationManager.instance().get("ignore_logs")
199
200 # Listen for messages and echo them for logging
201 def _echo(message):
202 try:
203 _message = json.loads(message)
204
205 if _message.get("type") in ignore_logs:
206 return
207
208 if _message.get("type") == "registration":
209 # do not log tokens from registration messages
210 _message["data"]["token"] = None
211 message = json.dumps(_message)
212 except:
213 pass
214 logger.debug(message)
215
216 ws.on('message', _echo)
217
218 # Kick off loading of skills
219 ws.once('open', _load_skills)
220 ws.run_forever()
221
222
223 if __name__ == "__main__":
224 try:
225 main()
226 except KeyboardInterrupt:
227 skills_manager_timer.cancel()
228 for skill in loaded_skills:
229 skill.shutdown()
230 if skill_reload_thread:
231 skill_reload_thread.cancel()
232
233 finally:
234 sys.exit()
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mycroft/skills/main.py b/mycroft/skills/main.py
--- a/mycroft/skills/main.py
+++ b/mycroft/skills/main.py
@@ -57,16 +57,25 @@
def install_default_skills(speak=True):
+ """
+ Install default skill set using msm.
+
+ Args:
+ speak (optional): Enable response for success. Default True
+ """
if exists(MSM_BIN):
- p = subprocess.Popen(MSM_BIN + " default", stderr=subprocess.STDOUT,
- stdout=subprocess.PIPE, shell=True)
- t = p.communicate()[0]
- if t.splitlines()[-1] == "Installed!" and speak:
+ res = subprocess.call(MSM_BIN + " default", stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE, shell=True)
+ if res == 0 and speak:
ws.emit(Message("speak", {
'utterance': mycroft.dialog.get("skills updated")}))
elif not connected():
ws.emit(Message("speak", {
'utterance': mycroft.dialog.get("no network connection")}))
+ elif res != 0:
+ ws.emit(Message("speak", {
+ 'utterance': mycroft.dialog.get(
+ "sorry I couldn't install default skills")}))
else:
logger.error("Unable to invoke Mycroft Skill Manager: " + MSM_BIN)
|
{"golden_diff": "diff --git a/mycroft/skills/main.py b/mycroft/skills/main.py\n--- a/mycroft/skills/main.py\n+++ b/mycroft/skills/main.py\n@@ -57,16 +57,25 @@\n \n \n def install_default_skills(speak=True):\n+ \"\"\"\n+ Install default skill set using msm.\n+\n+ Args:\n+ speak (optional): Enable response for success. Default True\n+ \"\"\"\n if exists(MSM_BIN):\n- p = subprocess.Popen(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n- stdout=subprocess.PIPE, shell=True)\n- t = p.communicate()[0]\n- if t.splitlines()[-1] == \"Installed!\" and speak:\n+ res = subprocess.call(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n+ stdout=subprocess.PIPE, shell=True)\n+ if res == 0 and speak:\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"skills updated\")}))\n elif not connected():\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"no network connection\")}))\n+ elif res != 0:\n+ ws.emit(Message(\"speak\", {\n+ 'utterance': mycroft.dialog.get(\n+ \"sorry I couldn't install default skills\")}))\n \n else:\n logger.error(\"Unable to invoke Mycroft Skill Manager: \" + MSM_BIN)\n", "issue": "Report errors while installing default skills\nInstalling default skills may fail quietly in some conditions, ideally some feedback to the user should be given.\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport json\nimport os\nimport subprocess\nimport sys\nimport time\nfrom os.path import exists, join\nfrom threading import Timer\n\nfrom mycroft import MYCROFT_ROOT_PATH\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.lock import Lock # Creates PID file for single instance\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.messagebus.message import Message\nfrom mycroft.skills.core import load_skill, create_skill_descriptor, \\\n MainModule, SKILLS_DIR\nfrom mycroft.skills.intent_service import IntentService\nfrom mycroft.util import connected\nfrom mycroft.util.log import getLogger\nimport mycroft.dialog\n\nlogger = getLogger(\"Skills\")\n\n__author__ = 'seanfitz'\n\nws = None\nloaded_skills = {}\nlast_modified_skill = 0\nskills_directories = []\nskill_reload_thread = None\nskills_manager_timer = None\n\ninstaller_config = ConfigurationManager.instance().get(\"SkillInstallerSkill\")\nMSM_BIN = installer_config.get(\"path\", join(MYCROFT_ROOT_PATH, 'msm', 'msm'))\n\n\ndef connect():\n global ws\n ws.run_forever()\n\n\ndef install_default_skills(speak=True):\n if exists(MSM_BIN):\n p = subprocess.Popen(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, shell=True)\n t = p.communicate()[0]\n if t.splitlines()[-1] == \"Installed!\" and speak:\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"skills updated\")}))\n elif not connected():\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"no network connection\")}))\n\n else:\n logger.error(\"Unable to invoke Mycroft Skill Manager: \" + MSM_BIN)\n\n\ndef skills_manager(message):\n global skills_manager_timer, ws\n\n if connected():\n if skills_manager_timer is None:\n ws.emit(\n Message(\"speak\", {'utterance':\n mycroft.dialog.get(\"checking for updates\")}))\n\n # Install default skills and look for updates via Github\n logger.debug(\"==== Invoking Mycroft Skill Manager: \" + MSM_BIN)\n install_default_skills(False)\n\n # Perform check again once and hour\n skills_manager_timer = Timer(3600, _skills_manager_dispatch)\n skills_manager_timer.daemon = True\n skills_manager_timer.start()\n\n\ndef _skills_manager_dispatch():\n ws.emit(Message(\"skill_manager\", {}))\n\n\ndef _load_skills():\n global ws, loaded_skills, last_modified_skill, skills_directories, \\\n skill_reload_thread\n\n check_connection()\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n IntentService(ws)\n\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = Timer(0, _watch_skills)\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n\ndef check_connection():\n if connected():\n ws.emit(Message('mycroft.internet.connected'))\n else:\n thread = Timer(1, check_connection)\n thread.daemon = True\n thread.start()\n\n\ndef _get_last_modified_date(path):\n last_date = 0\n # getting all recursive paths\n for root, _, _ in os.walk(path):\n f = root.replace(path, \"\")\n # checking if is a hidden path\n if not f.startswith(\".\") and not f.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(path + f))\n\n return last_date\n\n\ndef _watch_skills():\n global ws, loaded_skills, last_modified_skill, \\\n id_counter\n\n # Scan the file folder that contains Skills. If a Skill is updated,\n # unload the existing version from memory and reload from the disk.\n while True:\n if exists(SKILLS_DIR):\n # checking skills dir and getting all skills there\n list = filter(lambda x: os.path.isdir(\n os.path.join(SKILLS_DIR, x)), os.listdir(SKILLS_DIR))\n\n for skill_folder in list:\n if skill_folder not in loaded_skills:\n loaded_skills[skill_folder] = {}\n skill = loaded_skills.get(skill_folder)\n skill[\"path\"] = os.path.join(SKILLS_DIR, skill_folder)\n # checking if is a skill\n if not MainModule + \".py\" in os.listdir(skill[\"path\"]):\n continue\n # getting the newest modified date of skill\n skill[\"last_modified\"] = _get_last_modified_date(skill[\"path\"])\n modified = skill.get(\"last_modified\", 0)\n # checking if skill is loaded and wasn't modified\n if skill.get(\n \"loaded\") and modified <= last_modified_skill:\n continue\n # checking if skill was modified\n elif skill.get(\n \"instance\") and modified > last_modified_skill:\n # checking if skill should be reloaded\n if not skill[\"instance\"].reload_skill:\n continue\n logger.debug(\"Reloading Skill: \" + skill_folder)\n # removing listeners and stopping threads\n skill[\"instance\"].shutdown()\n del skill[\"instance\"]\n skill[\"loaded\"] = True\n skill[\"instance\"] = load_skill(\n create_skill_descriptor(skill[\"path\"]), ws)\n # get the last modified skill\n modified_dates = map(lambda x: x.get(\"last_modified\"),\n loaded_skills.values())\n if len(modified_dates) > 0:\n last_modified_skill = max(modified_dates)\n\n # Pause briefly before beginning next scan\n time.sleep(2)\n\n\ndef main():\n global ws\n lock = Lock('skills') # prevent multiple instances of this service\n\n # Connect this Skill management process to the websocket\n ws = WebsocketClient()\n ConfigurationManager.init(ws)\n\n ignore_logs = ConfigurationManager.instance().get(\"ignore_logs\")\n\n # Listen for messages and echo them for logging\n def _echo(message):\n try:\n _message = json.loads(message)\n\n if _message.get(\"type\") in ignore_logs:\n return\n\n if _message.get(\"type\") == \"registration\":\n # do not log tokens from registration messages\n _message[\"data\"][\"token\"] = None\n message = json.dumps(_message)\n except:\n pass\n logger.debug(message)\n\n ws.on('message', _echo)\n\n # Kick off loading of skills\n ws.once('open', _load_skills)\n ws.run_forever()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n skills_manager_timer.cancel()\n for skill in loaded_skills:\n skill.shutdown()\n if skill_reload_thread:\n skill_reload_thread.cancel()\n\n finally:\n sys.exit()\n", "path": "mycroft/skills/main.py"}], "after_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport json\nimport os\nimport subprocess\nimport sys\nimport time\nfrom os.path import exists, join\nfrom threading import Timer\n\nfrom mycroft import MYCROFT_ROOT_PATH\nfrom mycroft.configuration import ConfigurationManager\nfrom mycroft.lock import Lock # Creates PID file for single instance\nfrom mycroft.messagebus.client.ws import WebsocketClient\nfrom mycroft.messagebus.message import Message\nfrom mycroft.skills.core import load_skill, create_skill_descriptor, \\\n MainModule, SKILLS_DIR\nfrom mycroft.skills.intent_service import IntentService\nfrom mycroft.util import connected\nfrom mycroft.util.log import getLogger\nimport mycroft.dialog\n\nlogger = getLogger(\"Skills\")\n\n__author__ = 'seanfitz'\n\nws = None\nloaded_skills = {}\nlast_modified_skill = 0\nskills_directories = []\nskill_reload_thread = None\nskills_manager_timer = None\n\ninstaller_config = ConfigurationManager.instance().get(\"SkillInstallerSkill\")\nMSM_BIN = installer_config.get(\"path\", join(MYCROFT_ROOT_PATH, 'msm', 'msm'))\n\n\ndef connect():\n global ws\n ws.run_forever()\n\n\ndef install_default_skills(speak=True):\n \"\"\"\n Install default skill set using msm.\n\n Args:\n speak (optional): Enable response for success. Default True\n \"\"\"\n if exists(MSM_BIN):\n res = subprocess.call(MSM_BIN + \" default\", stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE, shell=True)\n if res == 0 and speak:\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"skills updated\")}))\n elif not connected():\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\"no network connection\")}))\n elif res != 0:\n ws.emit(Message(\"speak\", {\n 'utterance': mycroft.dialog.get(\n \"sorry I couldn't install default skills\")}))\n\n else:\n logger.error(\"Unable to invoke Mycroft Skill Manager: \" + MSM_BIN)\n\n\ndef skills_manager(message):\n global skills_manager_timer, ws\n\n if connected():\n if skills_manager_timer is None:\n ws.emit(\n Message(\"speak\", {'utterance':\n mycroft.dialog.get(\"checking for updates\")}))\n\n # Install default skills and look for updates via Github\n logger.debug(\"==== Invoking Mycroft Skill Manager: \" + MSM_BIN)\n install_default_skills(False)\n\n # Perform check again once and hour\n skills_manager_timer = Timer(3600, _skills_manager_dispatch)\n skills_manager_timer.daemon = True\n skills_manager_timer.start()\n\n\ndef _skills_manager_dispatch():\n ws.emit(Message(\"skill_manager\", {}))\n\n\ndef _load_skills():\n global ws, loaded_skills, last_modified_skill, skills_directories, \\\n skill_reload_thread\n\n check_connection()\n\n # Create skill_manager listener and invoke the first time\n ws.on('skill_manager', skills_manager)\n ws.on('mycroft.internet.connected', install_default_skills)\n ws.emit(Message('skill_manager', {}))\n\n # Create the Intent manager, which converts utterances to intents\n # This is the heart of the voice invoked skill system\n IntentService(ws)\n\n # Create a thread that monitors the loaded skills, looking for updates\n skill_reload_thread = Timer(0, _watch_skills)\n skill_reload_thread.daemon = True\n skill_reload_thread.start()\n\n\ndef check_connection():\n if connected():\n ws.emit(Message('mycroft.internet.connected'))\n else:\n thread = Timer(1, check_connection)\n thread.daemon = True\n thread.start()\n\n\ndef _get_last_modified_date(path):\n last_date = 0\n # getting all recursive paths\n for root, _, _ in os.walk(path):\n f = root.replace(path, \"\")\n # checking if is a hidden path\n if not f.startswith(\".\") and not f.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(path + f))\n\n return last_date\n\n\ndef _watch_skills():\n global ws, loaded_skills, last_modified_skill, \\\n id_counter\n\n # Scan the file folder that contains Skills. If a Skill is updated,\n # unload the existing version from memory and reload from the disk.\n while True:\n if exists(SKILLS_DIR):\n # checking skills dir and getting all skills there\n list = filter(lambda x: os.path.isdir(\n os.path.join(SKILLS_DIR, x)), os.listdir(SKILLS_DIR))\n\n for skill_folder in list:\n if skill_folder not in loaded_skills:\n loaded_skills[skill_folder] = {}\n skill = loaded_skills.get(skill_folder)\n skill[\"path\"] = os.path.join(SKILLS_DIR, skill_folder)\n # checking if is a skill\n if not MainModule + \".py\" in os.listdir(skill[\"path\"]):\n continue\n # getting the newest modified date of skill\n skill[\"last_modified\"] = _get_last_modified_date(skill[\"path\"])\n modified = skill.get(\"last_modified\", 0)\n # checking if skill is loaded and wasn't modified\n if skill.get(\n \"loaded\") and modified <= last_modified_skill:\n continue\n # checking if skill was modified\n elif skill.get(\n \"instance\") and modified > last_modified_skill:\n # checking if skill should be reloaded\n if not skill[\"instance\"].reload_skill:\n continue\n logger.debug(\"Reloading Skill: \" + skill_folder)\n # removing listeners and stopping threads\n skill[\"instance\"].shutdown()\n del skill[\"instance\"]\n skill[\"loaded\"] = True\n skill[\"instance\"] = load_skill(\n create_skill_descriptor(skill[\"path\"]), ws)\n # get the last modified skill\n modified_dates = map(lambda x: x.get(\"last_modified\"),\n loaded_skills.values())\n if len(modified_dates) > 0:\n last_modified_skill = max(modified_dates)\n\n # Pause briefly before beginning next scan\n time.sleep(2)\n\n\ndef main():\n global ws\n lock = Lock('skills') # prevent multiple instances of this service\n\n # Connect this Skill management process to the websocket\n ws = WebsocketClient()\n ConfigurationManager.init(ws)\n\n ignore_logs = ConfigurationManager.instance().get(\"ignore_logs\")\n\n # Listen for messages and echo them for logging\n def _echo(message):\n try:\n _message = json.loads(message)\n\n if _message.get(\"type\") in ignore_logs:\n return\n\n if _message.get(\"type\") == \"registration\":\n # do not log tokens from registration messages\n _message[\"data\"][\"token\"] = None\n message = json.dumps(_message)\n except:\n pass\n logger.debug(message)\n\n ws.on('message', _echo)\n\n # Kick off loading of skills\n ws.once('open', _load_skills)\n ws.run_forever()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n skills_manager_timer.cancel()\n for skill in loaded_skills:\n skill.shutdown()\n if skill_reload_thread:\n skill_reload_thread.cancel()\n\n finally:\n sys.exit()\n", "path": "mycroft/skills/main.py"}]}
| 2,547 | 307 |
gh_patches_debug_35938
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8865
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UX Unification - Cleanup for slim header in page listing
### Issue Summary
#8792 introduced some issues due to conflict with other PRs that have been merged previously. In addition, there are also other improvements that can be made.
### Details
- [ ] When moving `.form-side` styles to its own `_form-side.scss` file, some updates introduced in #8671 weren't carried over. By the way, should this be in `components` rather than `layouts`?
**Original:** https://github.com/wagtail/wagtail/blob/0693eed2454240b4d0f31da5166772bdd106dbb9/client/scss/layouts/_page-editor.scss#L61-L103
**Moved:** https://github.com/wagtail/wagtail/blob/35b5ed350a30dfb626d55c0b2c021976c1f65761/client/scss/layouts/_form-side.scss#L5-L47
- [ ] When moving side panel init code to a separate `sidePanel.js` file, the original code in `page-editor.js` hasn't been removed. This results in the event listeners being registered twice, making the side panels inaccessible in the page editor (it immediately closes again). To fix this, remove the following: https://github.com/wagtail/wagtail/blob/35b5ed350a30dfb626d55c0b2c021976c1f65761/client/src/entrypoints/admin/page-editor.js#L333-L407
- [ ] The locale selector in the side panel should take the user to the localised page in the **page explorer**. Right now, it brings you to the editor instead. Possible solutions: add another keyword argument to `PageSidePanels`, e.g. `in_explorer`, or make a subclass e.g. `PageExplorerSidePanels` in case we need more customisations in the future.
There may be other issues I haven't noticed. Will update later if I find more.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/ui/side_panels.py`
Content:
```
1 from django.conf import settings
2 from django.forms import Media
3 from django.urls import reverse
4 from django.utils.functional import cached_property
5 from django.utils.text import capfirst
6 from django.utils.translation import gettext_lazy
7
8 from wagtail.admin.ui.components import Component
9 from wagtail.models import Page, UserPagePermissionsProxy
10
11
12 class BaseSidePanel(Component):
13 def __init__(self, object, request):
14 self.object = object
15 self.request = request
16 self.model = type(self.object)
17
18 def get_context_data(self, parent_context):
19 context = {"panel": self, "object": self.object, "request": self.request}
20 if issubclass(self.model, Page):
21 context["page"] = self.object
22 return context
23
24
25 class BaseStatusSidePanel(BaseSidePanel):
26 name = "status"
27 title = gettext_lazy("Status")
28 template_name = "wagtailadmin/shared/side_panels/status.html"
29 order = 100
30 toggle_aria_label = gettext_lazy("Toggle status")
31 toggle_icon_name = "info-circle"
32
33 def get_status_templates(self, context):
34 templates = []
35
36 if self.object.pk:
37 templates += [
38 "wagtailadmin/shared/side_panels/includes/status/workflow.html",
39 ]
40
41 if context.get("locale"):
42 templates += ["wagtailadmin/shared/side_panels/includes/status/locale.html"]
43
44 return templates
45
46 def get_context_data(self, parent_context):
47 context = super().get_context_data(parent_context)
48 context["model_name"] = capfirst(self.model._meta.verbose_name)
49 context["status_templates"] = self.get_status_templates(context)
50 return context
51
52
53 class PageStatusSidePanel(BaseStatusSidePanel):
54 def get_status_templates(self, context):
55 templates = super().get_status_templates(context)
56 if self.object.pk:
57 templates += ["wagtailadmin/shared/side_panels/includes/status/locked.html"]
58 templates += ["wagtailadmin/shared/side_panels/includes/status/privacy.html"]
59 return templates
60
61 def get_context_data(self, parent_context):
62 context = super().get_context_data(parent_context)
63 user_perms = UserPagePermissionsProxy(self.request.user)
64 page = self.object
65
66 if page.id:
67 context.update(
68 {
69 "history_url": reverse(
70 "wagtailadmin_pages:history", args=(page.id,)
71 ),
72 "lock_url": reverse("wagtailadmin_pages:lock", args=(page.id,)),
73 "unlock_url": reverse("wagtailadmin_pages:unlock", args=(page.id,)),
74 "user_can_lock": user_perms.for_page(page).can_lock(),
75 "user_can_unlock": user_perms.for_page(page).can_unlock(),
76 "locale": None,
77 "translations": [],
78 }
79 )
80 else:
81 context.update(
82 {
83 "locale": None,
84 "translations": [],
85 }
86 )
87
88 if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
89 context.update(
90 {
91 "locale": page.locale,
92 "translations": [
93 {
94 "locale": translation.locale,
95 "url": reverse(
96 "wagtailadmin_pages:edit", args=[translation.id]
97 ),
98 }
99 for translation in page.get_translations()
100 .only("id", "locale", "depth")
101 .select_related("locale")
102 if user_perms.for_page(translation).can_edit()
103 ],
104 # The sum of translated pages plus 1 to account for the current page
105 "translations_total": page.get_translations().count() + 1,
106 }
107 )
108
109 context.update(
110 {
111 "model_name": self.model.get_verbose_name(),
112 "model_description": self.model.get_page_description(),
113 "status_templates": self.get_status_templates(context),
114 }
115 )
116
117 return context
118
119
120 class CommentsSidePanel(BaseSidePanel):
121 name = "comments"
122 title = gettext_lazy("Comments")
123 template_name = "wagtailadmin/shared/side_panels/comments.html"
124 order = 300
125 toggle_aria_label = gettext_lazy("Toggle comments")
126 toggle_icon_name = "comment"
127
128
129 class BasePreviewSidePanel(BaseSidePanel):
130 name = "preview"
131 title = gettext_lazy("Preview")
132 template_name = "wagtailadmin/shared/side_panels/preview.html"
133 order = 400
134 toggle_aria_label = gettext_lazy("Toggle preview")
135 toggle_icon_name = "mobile-alt"
136
137 def get_context_data(self, parent_context):
138 context = super().get_context_data(parent_context)
139 context["has_multiple_modes"] = len(self.object.preview_modes) > 1
140 return context
141
142
143 class PagePreviewSidePanel(BasePreviewSidePanel):
144 def get_context_data(self, parent_context):
145 context = super().get_context_data(parent_context)
146 if self.object.id:
147 context["preview_url"] = reverse(
148 "wagtailadmin_pages:preview_on_edit", args=[self.object.id]
149 )
150 else:
151 content_type = parent_context["content_type"]
152 parent_page = parent_context["parent_page"]
153 context["preview_url"] = reverse(
154 "wagtailadmin_pages:preview_on_add",
155 args=[content_type.app_label, content_type.model, parent_page.id],
156 )
157 return context
158
159
160 class BaseSidePanels:
161 def __init__(self, request, object):
162 self.request = request
163 self.object = object
164
165 self.side_panels = [
166 BaseStatusSidePanel(object, self.request),
167 ]
168
169 def __iter__(self):
170 return iter(sorted(self.side_panels, key=lambda p: p.order))
171
172 @cached_property
173 def media(self):
174 media = Media()
175 for panel in self:
176 media += panel.media
177 return media
178
179
180 class PageSidePanels(BaseSidePanels):
181 def __init__(self, request, page, *, preview_enabled, comments_enabled):
182 super().__init__(request, page)
183
184 self.side_panels = [
185 PageStatusSidePanel(page, self.request),
186 ]
187
188 if preview_enabled and page.preview_modes:
189 self.side_panels += [
190 PagePreviewSidePanel(page, self.request),
191 ]
192
193 if comments_enabled:
194 self.side_panels += [
195 CommentsSidePanel(page, self.request),
196 ]
197
```
Path: `wagtail/admin/views/pages/listing.py`
Content:
```
1 from django.conf import settings
2 from django.core.paginator import Paginator
3 from django.db.models import Count
4 from django.shortcuts import get_object_or_404, redirect
5 from django.template.response import TemplateResponse
6 from django.urls import reverse
7
8 from wagtail import hooks
9 from wagtail.admin.auth import user_has_any_page_permission, user_passes_test
10 from wagtail.admin.navigation import get_explorable_root_page
11 from wagtail.admin.ui.side_panels import PageSidePanels
12 from wagtail.models import Page, UserPagePermissionsProxy
13
14
15 @user_passes_test(user_has_any_page_permission)
16 def index(request, parent_page_id=None):
17 if parent_page_id:
18 parent_page = get_object_or_404(Page, id=parent_page_id)
19 else:
20 parent_page = Page.get_first_root_node()
21
22 # This will always succeed because of the @user_passes_test above.
23 root_page = get_explorable_root_page(request.user)
24
25 # If this page isn't a descendant of the user's explorable root page,
26 # then redirect to that explorable root page instead.
27 if not (parent_page.pk == root_page.pk or parent_page.is_descendant_of(root_page)):
28 return redirect("wagtailadmin_explore", root_page.pk)
29
30 parent_page = parent_page.specific
31
32 user_perms = UserPagePermissionsProxy(request.user)
33 pages = (
34 parent_page.get_children().prefetch_related("content_type", "sites_rooted_here")
35 & user_perms.explorable_pages()
36 )
37
38 # Get page ordering
39 ordering = request.GET.get("ordering", "-latest_revision_created_at")
40 if ordering not in [
41 "title",
42 "-title",
43 "content_type",
44 "-content_type",
45 "live",
46 "-live",
47 "latest_revision_created_at",
48 "-latest_revision_created_at",
49 "ord",
50 ]:
51 ordering = "-latest_revision_created_at"
52
53 if ordering == "ord":
54 # preserve the native ordering from get_children()
55 pass
56 elif ordering == "latest_revision_created_at":
57 # order by oldest revision first.
58 # Special case NULL entries - these should go at the top of the list.
59 # Do this by annotating with Count('latest_revision_created_at'),
60 # which returns 0 for these
61 pages = pages.annotate(
62 null_position=Count("latest_revision_created_at")
63 ).order_by("null_position", "latest_revision_created_at")
64 elif ordering == "-latest_revision_created_at":
65 # order by oldest revision first.
66 # Special case NULL entries - these should go at the end of the list.
67 pages = pages.annotate(
68 null_position=Count("latest_revision_created_at")
69 ).order_by("-null_position", "-latest_revision_created_at")
70 else:
71 pages = pages.order_by(ordering)
72
73 # Don't paginate if sorting by page order - all pages must be shown to
74 # allow drag-and-drop reordering
75 do_paginate = ordering != "ord"
76
77 # We want specific page instances, but do not need streamfield values here
78 pages = pages.defer_streamfields().specific()
79
80 # allow hooks defer_streamfieldsyset
81 for hook in hooks.get_hooks("construct_explorer_page_queryset"):
82 pages = hook(parent_page, pages, request)
83
84 # Annotate queryset with various states to be used later for performance optimisations
85 if getattr(settings, "WAGTAIL_WORKFLOW_ENABLED", True):
86 pages = pages.prefetch_workflow_states()
87
88 pages = pages.annotate_site_root_state().annotate_approved_schedule()
89
90 # Pagination
91 if do_paginate:
92 paginator = Paginator(pages, per_page=50)
93 pages = paginator.get_page(request.GET.get("p"))
94
95 show_ordering_column = request.GET.get("ordering") == "ord"
96
97 side_panels = PageSidePanels(
98 request,
99 parent_page.specific,
100 preview_enabled=False,
101 comments_enabled=False,
102 )
103
104 context = {
105 "parent_page": parent_page.specific,
106 "ordering": ordering,
107 "side_panels": side_panels,
108 "pages": pages,
109 "do_paginate": do_paginate,
110 "locale": None,
111 "translations": [],
112 "show_ordering_column": show_ordering_column,
113 "show_bulk_actions": not show_ordering_column,
114 "show_locale_labels": False,
115 }
116
117 if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
118 if not parent_page.is_root():
119 context.update(
120 {
121 "locale": parent_page.locale,
122 "translations": [
123 {
124 "locale": translation.locale,
125 "url": reverse(
126 "wagtailadmin_explore", args=[translation.id]
127 ),
128 }
129 for translation in parent_page.get_translations()
130 .only("id", "locale")
131 .select_related("locale")
132 ],
133 }
134 )
135 else:
136 context["show_locale_labels"] = True
137
138 return TemplateResponse(request, "wagtailadmin/pages/index.html", context)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/ui/side_panels.py b/wagtail/admin/ui/side_panels.py
--- a/wagtail/admin/ui/side_panels.py
+++ b/wagtail/admin/ui/side_panels.py
@@ -30,6 +30,10 @@
toggle_aria_label = gettext_lazy("Toggle status")
toggle_icon_name = "info-circle"
+ def __init__(self, *args, in_explorer=False, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.in_explorer = in_explorer
+
def get_status_templates(self, context):
templates = []
@@ -86,15 +90,17 @@
)
if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
+ url_name = "wagtailadmin_pages:edit"
+ if self.in_explorer:
+ url_name = "wagtailadmin_explore"
+
context.update(
{
"locale": page.locale,
"translations": [
{
"locale": translation.locale,
- "url": reverse(
- "wagtailadmin_pages:edit", args=[translation.id]
- ),
+ "url": reverse(url_name, args=[translation.id]),
}
for translation in page.get_translations()
.only("id", "locale", "depth")
@@ -178,11 +184,13 @@
class PageSidePanels(BaseSidePanels):
- def __init__(self, request, page, *, preview_enabled, comments_enabled):
+ def __init__(
+ self, request, page, *, preview_enabled, comments_enabled, in_explorer=False
+ ):
super().__init__(request, page)
self.side_panels = [
- PageStatusSidePanel(page, self.request),
+ PageStatusSidePanel(page, self.request, in_explorer=in_explorer),
]
if preview_enabled and page.preview_modes:
diff --git a/wagtail/admin/views/pages/listing.py b/wagtail/admin/views/pages/listing.py
--- a/wagtail/admin/views/pages/listing.py
+++ b/wagtail/admin/views/pages/listing.py
@@ -97,6 +97,7 @@
side_panels = PageSidePanels(
request,
parent_page.specific,
+ in_explorer=True,
preview_enabled=False,
comments_enabled=False,
)
|
{"golden_diff": "diff --git a/wagtail/admin/ui/side_panels.py b/wagtail/admin/ui/side_panels.py\n--- a/wagtail/admin/ui/side_panels.py\n+++ b/wagtail/admin/ui/side_panels.py\n@@ -30,6 +30,10 @@\n toggle_aria_label = gettext_lazy(\"Toggle status\")\n toggle_icon_name = \"info-circle\"\n \n+ def __init__(self, *args, in_explorer=False, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.in_explorer = in_explorer\n+\n def get_status_templates(self, context):\n templates = []\n \n@@ -86,15 +90,17 @@\n )\n \n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n+ url_name = \"wagtailadmin_pages:edit\"\n+ if self.in_explorer:\n+ url_name = \"wagtailadmin_explore\"\n+\n context.update(\n {\n \"locale\": page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n- \"url\": reverse(\n- \"wagtailadmin_pages:edit\", args=[translation.id]\n- ),\n+ \"url\": reverse(url_name, args=[translation.id]),\n }\n for translation in page.get_translations()\n .only(\"id\", \"locale\", \"depth\")\n@@ -178,11 +184,13 @@\n \n \n class PageSidePanels(BaseSidePanels):\n- def __init__(self, request, page, *, preview_enabled, comments_enabled):\n+ def __init__(\n+ self, request, page, *, preview_enabled, comments_enabled, in_explorer=False\n+ ):\n super().__init__(request, page)\n \n self.side_panels = [\n- PageStatusSidePanel(page, self.request),\n+ PageStatusSidePanel(page, self.request, in_explorer=in_explorer),\n ]\n \n if preview_enabled and page.preview_modes:\ndiff --git a/wagtail/admin/views/pages/listing.py b/wagtail/admin/views/pages/listing.py\n--- a/wagtail/admin/views/pages/listing.py\n+++ b/wagtail/admin/views/pages/listing.py\n@@ -97,6 +97,7 @@\n side_panels = PageSidePanels(\n request,\n parent_page.specific,\n+ in_explorer=True,\n preview_enabled=False,\n comments_enabled=False,\n )\n", "issue": "UX Unification - Cleanup for slim header in page listing\n### Issue Summary\r\n\r\n#8792 introduced some issues due to conflict with other PRs that have been merged previously. In addition, there are also other improvements that can be made.\r\n\r\n### Details\r\n\r\n- [ ] When moving `.form-side` styles to its own `_form-side.scss` file, some updates introduced in #8671 weren't carried over. By the way, should this be in `components` rather than `layouts`?\r\n **Original:** https://github.com/wagtail/wagtail/blob/0693eed2454240b4d0f31da5166772bdd106dbb9/client/scss/layouts/_page-editor.scss#L61-L103\r\n **Moved:** https://github.com/wagtail/wagtail/blob/35b5ed350a30dfb626d55c0b2c021976c1f65761/client/scss/layouts/_form-side.scss#L5-L47\r\n- [ ] When moving side panel init code to a separate `sidePanel.js` file, the original code in `page-editor.js` hasn't been removed. This results in the event listeners being registered twice, making the side panels inaccessible in the page editor (it immediately closes again). To fix this, remove the following: https://github.com/wagtail/wagtail/blob/35b5ed350a30dfb626d55c0b2c021976c1f65761/client/src/entrypoints/admin/page-editor.js#L333-L407\r\n- [ ] The locale selector in the side panel should take the user to the localised page in the **page explorer**. Right now, it brings you to the editor instead. Possible solutions: add another keyword argument to `PageSidePanels`, e.g. `in_explorer`, or make a subclass e.g. `PageExplorerSidePanels` in case we need more customisations in the future.\r\n\r\nThere may be other issues I haven't noticed. Will update later if I find more.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.forms import Media\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy\n\nfrom wagtail.admin.ui.components import Component\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\n\nclass BaseSidePanel(Component):\n def __init__(self, object, request):\n self.object = object\n self.request = request\n self.model = type(self.object)\n\n def get_context_data(self, parent_context):\n context = {\"panel\": self, \"object\": self.object, \"request\": self.request}\n if issubclass(self.model, Page):\n context[\"page\"] = self.object\n return context\n\n\nclass BaseStatusSidePanel(BaseSidePanel):\n name = \"status\"\n title = gettext_lazy(\"Status\")\n template_name = \"wagtailadmin/shared/side_panels/status.html\"\n order = 100\n toggle_aria_label = gettext_lazy(\"Toggle status\")\n toggle_icon_name = \"info-circle\"\n\n def get_status_templates(self, context):\n templates = []\n\n if self.object.pk:\n templates += [\n \"wagtailadmin/shared/side_panels/includes/status/workflow.html\",\n ]\n\n if context.get(\"locale\"):\n templates += [\"wagtailadmin/shared/side_panels/includes/status/locale.html\"]\n\n return templates\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n context[\"model_name\"] = capfirst(self.model._meta.verbose_name)\n context[\"status_templates\"] = self.get_status_templates(context)\n return context\n\n\nclass PageStatusSidePanel(BaseStatusSidePanel):\n def get_status_templates(self, context):\n templates = super().get_status_templates(context)\n if self.object.pk:\n templates += [\"wagtailadmin/shared/side_panels/includes/status/locked.html\"]\n templates += [\"wagtailadmin/shared/side_panels/includes/status/privacy.html\"]\n return templates\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n user_perms = UserPagePermissionsProxy(self.request.user)\n page = self.object\n\n if page.id:\n context.update(\n {\n \"history_url\": reverse(\n \"wagtailadmin_pages:history\", args=(page.id,)\n ),\n \"lock_url\": reverse(\"wagtailadmin_pages:lock\", args=(page.id,)),\n \"unlock_url\": reverse(\"wagtailadmin_pages:unlock\", args=(page.id,)),\n \"user_can_lock\": user_perms.for_page(page).can_lock(),\n \"user_can_unlock\": user_perms.for_page(page).can_unlock(),\n \"locale\": None,\n \"translations\": [],\n }\n )\n else:\n context.update(\n {\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n context.update(\n {\n \"locale\": page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_pages:edit\", args=[translation.id]\n ),\n }\n for translation in page.get_translations()\n .only(\"id\", \"locale\", \"depth\")\n .select_related(\"locale\")\n if user_perms.for_page(translation).can_edit()\n ],\n # The sum of translated pages plus 1 to account for the current page\n \"translations_total\": page.get_translations().count() + 1,\n }\n )\n\n context.update(\n {\n \"model_name\": self.model.get_verbose_name(),\n \"model_description\": self.model.get_page_description(),\n \"status_templates\": self.get_status_templates(context),\n }\n )\n\n return context\n\n\nclass CommentsSidePanel(BaseSidePanel):\n name = \"comments\"\n title = gettext_lazy(\"Comments\")\n template_name = \"wagtailadmin/shared/side_panels/comments.html\"\n order = 300\n toggle_aria_label = gettext_lazy(\"Toggle comments\")\n toggle_icon_name = \"comment\"\n\n\nclass BasePreviewSidePanel(BaseSidePanel):\n name = \"preview\"\n title = gettext_lazy(\"Preview\")\n template_name = \"wagtailadmin/shared/side_panels/preview.html\"\n order = 400\n toggle_aria_label = gettext_lazy(\"Toggle preview\")\n toggle_icon_name = \"mobile-alt\"\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n context[\"has_multiple_modes\"] = len(self.object.preview_modes) > 1\n return context\n\n\nclass PagePreviewSidePanel(BasePreviewSidePanel):\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n if self.object.id:\n context[\"preview_url\"] = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=[self.object.id]\n )\n else:\n content_type = parent_context[\"content_type\"]\n parent_page = parent_context[\"parent_page\"]\n context[\"preview_url\"] = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=[content_type.app_label, content_type.model, parent_page.id],\n )\n return context\n\n\nclass BaseSidePanels:\n def __init__(self, request, object):\n self.request = request\n self.object = object\n\n self.side_panels = [\n BaseStatusSidePanel(object, self.request),\n ]\n\n def __iter__(self):\n return iter(sorted(self.side_panels, key=lambda p: p.order))\n\n @cached_property\n def media(self):\n media = Media()\n for panel in self:\n media += panel.media\n return media\n\n\nclass PageSidePanels(BaseSidePanels):\n def __init__(self, request, page, *, preview_enabled, comments_enabled):\n super().__init__(request, page)\n\n self.side_panels = [\n PageStatusSidePanel(page, self.request),\n ]\n\n if preview_enabled and page.preview_modes:\n self.side_panels += [\n PagePreviewSidePanel(page, self.request),\n ]\n\n if comments_enabled:\n self.side_panels += [\n CommentsSidePanel(page, self.request),\n ]\n", "path": "wagtail/admin/ui/side_panels.py"}, {"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom wagtail import hooks\nfrom wagtail.admin.auth import user_has_any_page_permission, user_passes_test\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.ui.side_panels import PageSidePanels\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\n\n@user_passes_test(user_has_any_page_permission)\ndef index(request, parent_page_id=None):\n if parent_page_id:\n parent_page = get_object_or_404(Page, id=parent_page_id)\n else:\n parent_page = Page.get_first_root_node()\n\n # This will always succeed because of the @user_passes_test above.\n root_page = get_explorable_root_page(request.user)\n\n # If this page isn't a descendant of the user's explorable root page,\n # then redirect to that explorable root page instead.\n if not (parent_page.pk == root_page.pk or parent_page.is_descendant_of(root_page)):\n return redirect(\"wagtailadmin_explore\", root_page.pk)\n\n parent_page = parent_page.specific\n\n user_perms = UserPagePermissionsProxy(request.user)\n pages = (\n parent_page.get_children().prefetch_related(\"content_type\", \"sites_rooted_here\")\n & user_perms.explorable_pages()\n )\n\n # Get page ordering\n ordering = request.GET.get(\"ordering\", \"-latest_revision_created_at\")\n if ordering not in [\n \"title\",\n \"-title\",\n \"content_type\",\n \"-content_type\",\n \"live\",\n \"-live\",\n \"latest_revision_created_at\",\n \"-latest_revision_created_at\",\n \"ord\",\n ]:\n ordering = \"-latest_revision_created_at\"\n\n if ordering == \"ord\":\n # preserve the native ordering from get_children()\n pass\n elif ordering == \"latest_revision_created_at\":\n # order by oldest revision first.\n # Special case NULL entries - these should go at the top of the list.\n # Do this by annotating with Count('latest_revision_created_at'),\n # which returns 0 for these\n pages = pages.annotate(\n null_position=Count(\"latest_revision_created_at\")\n ).order_by(\"null_position\", \"latest_revision_created_at\")\n elif ordering == \"-latest_revision_created_at\":\n # order by oldest revision first.\n # Special case NULL entries - these should go at the end of the list.\n pages = pages.annotate(\n null_position=Count(\"latest_revision_created_at\")\n ).order_by(\"-null_position\", \"-latest_revision_created_at\")\n else:\n pages = pages.order_by(ordering)\n\n # Don't paginate if sorting by page order - all pages must be shown to\n # allow drag-and-drop reordering\n do_paginate = ordering != \"ord\"\n\n # We want specific page instances, but do not need streamfield values here\n pages = pages.defer_streamfields().specific()\n\n # allow hooks defer_streamfieldsyset\n for hook in hooks.get_hooks(\"construct_explorer_page_queryset\"):\n pages = hook(parent_page, pages, request)\n\n # Annotate queryset with various states to be used later for performance optimisations\n if getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n pages = pages.prefetch_workflow_states()\n\n pages = pages.annotate_site_root_state().annotate_approved_schedule()\n\n # Pagination\n if do_paginate:\n paginator = Paginator(pages, per_page=50)\n pages = paginator.get_page(request.GET.get(\"p\"))\n\n show_ordering_column = request.GET.get(\"ordering\") == \"ord\"\n\n side_panels = PageSidePanels(\n request,\n parent_page.specific,\n preview_enabled=False,\n comments_enabled=False,\n )\n\n context = {\n \"parent_page\": parent_page.specific,\n \"ordering\": ordering,\n \"side_panels\": side_panels,\n \"pages\": pages,\n \"do_paginate\": do_paginate,\n \"locale\": None,\n \"translations\": [],\n \"show_ordering_column\": show_ordering_column,\n \"show_bulk_actions\": not show_ordering_column,\n \"show_locale_labels\": False,\n }\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n if not parent_page.is_root():\n context.update(\n {\n \"locale\": parent_page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_explore\", args=[translation.id]\n ),\n }\n for translation in parent_page.get_translations()\n .only(\"id\", \"locale\")\n .select_related(\"locale\")\n ],\n }\n )\n else:\n context[\"show_locale_labels\"] = True\n\n return TemplateResponse(request, \"wagtailadmin/pages/index.html\", context)\n", "path": "wagtail/admin/views/pages/listing.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.forms import Media\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext_lazy\n\nfrom wagtail.admin.ui.components import Component\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\n\nclass BaseSidePanel(Component):\n def __init__(self, object, request):\n self.object = object\n self.request = request\n self.model = type(self.object)\n\n def get_context_data(self, parent_context):\n context = {\"panel\": self, \"object\": self.object, \"request\": self.request}\n if issubclass(self.model, Page):\n context[\"page\"] = self.object\n return context\n\n\nclass BaseStatusSidePanel(BaseSidePanel):\n name = \"status\"\n title = gettext_lazy(\"Status\")\n template_name = \"wagtailadmin/shared/side_panels/status.html\"\n order = 100\n toggle_aria_label = gettext_lazy(\"Toggle status\")\n toggle_icon_name = \"info-circle\"\n\n def __init__(self, *args, in_explorer=False, **kwargs):\n super().__init__(*args, **kwargs)\n self.in_explorer = in_explorer\n\n def get_status_templates(self, context):\n templates = []\n\n if self.object.pk:\n templates += [\n \"wagtailadmin/shared/side_panels/includes/status/workflow.html\",\n ]\n\n if context.get(\"locale\"):\n templates += [\"wagtailadmin/shared/side_panels/includes/status/locale.html\"]\n\n return templates\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n context[\"model_name\"] = capfirst(self.model._meta.verbose_name)\n context[\"status_templates\"] = self.get_status_templates(context)\n return context\n\n\nclass PageStatusSidePanel(BaseStatusSidePanel):\n def get_status_templates(self, context):\n templates = super().get_status_templates(context)\n if self.object.pk:\n templates += [\"wagtailadmin/shared/side_panels/includes/status/locked.html\"]\n templates += [\"wagtailadmin/shared/side_panels/includes/status/privacy.html\"]\n return templates\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n user_perms = UserPagePermissionsProxy(self.request.user)\n page = self.object\n\n if page.id:\n context.update(\n {\n \"history_url\": reverse(\n \"wagtailadmin_pages:history\", args=(page.id,)\n ),\n \"lock_url\": reverse(\"wagtailadmin_pages:lock\", args=(page.id,)),\n \"unlock_url\": reverse(\"wagtailadmin_pages:unlock\", args=(page.id,)),\n \"user_can_lock\": user_perms.for_page(page).can_lock(),\n \"user_can_unlock\": user_perms.for_page(page).can_unlock(),\n \"locale\": None,\n \"translations\": [],\n }\n )\n else:\n context.update(\n {\n \"locale\": None,\n \"translations\": [],\n }\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n url_name = \"wagtailadmin_pages:edit\"\n if self.in_explorer:\n url_name = \"wagtailadmin_explore\"\n\n context.update(\n {\n \"locale\": page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(url_name, args=[translation.id]),\n }\n for translation in page.get_translations()\n .only(\"id\", \"locale\", \"depth\")\n .select_related(\"locale\")\n if user_perms.for_page(translation).can_edit()\n ],\n # The sum of translated pages plus 1 to account for the current page\n \"translations_total\": page.get_translations().count() + 1,\n }\n )\n\n context.update(\n {\n \"model_name\": self.model.get_verbose_name(),\n \"model_description\": self.model.get_page_description(),\n \"status_templates\": self.get_status_templates(context),\n }\n )\n\n return context\n\n\nclass CommentsSidePanel(BaseSidePanel):\n name = \"comments\"\n title = gettext_lazy(\"Comments\")\n template_name = \"wagtailadmin/shared/side_panels/comments.html\"\n order = 300\n toggle_aria_label = gettext_lazy(\"Toggle comments\")\n toggle_icon_name = \"comment\"\n\n\nclass BasePreviewSidePanel(BaseSidePanel):\n name = \"preview\"\n title = gettext_lazy(\"Preview\")\n template_name = \"wagtailadmin/shared/side_panels/preview.html\"\n order = 400\n toggle_aria_label = gettext_lazy(\"Toggle preview\")\n toggle_icon_name = \"mobile-alt\"\n\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n context[\"has_multiple_modes\"] = len(self.object.preview_modes) > 1\n return context\n\n\nclass PagePreviewSidePanel(BasePreviewSidePanel):\n def get_context_data(self, parent_context):\n context = super().get_context_data(parent_context)\n if self.object.id:\n context[\"preview_url\"] = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=[self.object.id]\n )\n else:\n content_type = parent_context[\"content_type\"]\n parent_page = parent_context[\"parent_page\"]\n context[\"preview_url\"] = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=[content_type.app_label, content_type.model, parent_page.id],\n )\n return context\n\n\nclass BaseSidePanels:\n def __init__(self, request, object):\n self.request = request\n self.object = object\n\n self.side_panels = [\n BaseStatusSidePanel(object, self.request),\n ]\n\n def __iter__(self):\n return iter(sorted(self.side_panels, key=lambda p: p.order))\n\n @cached_property\n def media(self):\n media = Media()\n for panel in self:\n media += panel.media\n return media\n\n\nclass PageSidePanels(BaseSidePanels):\n def __init__(\n self, request, page, *, preview_enabled, comments_enabled, in_explorer=False\n ):\n super().__init__(request, page)\n\n self.side_panels = [\n PageStatusSidePanel(page, self.request, in_explorer=in_explorer),\n ]\n\n if preview_enabled and page.preview_modes:\n self.side_panels += [\n PagePreviewSidePanel(page, self.request),\n ]\n\n if comments_enabled:\n self.side_panels += [\n CommentsSidePanel(page, self.request),\n ]\n", "path": "wagtail/admin/ui/side_panels.py"}, {"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\n\nfrom wagtail import hooks\nfrom wagtail.admin.auth import user_has_any_page_permission, user_passes_test\nfrom wagtail.admin.navigation import get_explorable_root_page\nfrom wagtail.admin.ui.side_panels import PageSidePanels\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\n\n@user_passes_test(user_has_any_page_permission)\ndef index(request, parent_page_id=None):\n if parent_page_id:\n parent_page = get_object_or_404(Page, id=parent_page_id)\n else:\n parent_page = Page.get_first_root_node()\n\n # This will always succeed because of the @user_passes_test above.\n root_page = get_explorable_root_page(request.user)\n\n # If this page isn't a descendant of the user's explorable root page,\n # then redirect to that explorable root page instead.\n if not (parent_page.pk == root_page.pk or parent_page.is_descendant_of(root_page)):\n return redirect(\"wagtailadmin_explore\", root_page.pk)\n\n parent_page = parent_page.specific\n\n user_perms = UserPagePermissionsProxy(request.user)\n pages = (\n parent_page.get_children().prefetch_related(\"content_type\", \"sites_rooted_here\")\n & user_perms.explorable_pages()\n )\n\n # Get page ordering\n ordering = request.GET.get(\"ordering\", \"-latest_revision_created_at\")\n if ordering not in [\n \"title\",\n \"-title\",\n \"content_type\",\n \"-content_type\",\n \"live\",\n \"-live\",\n \"latest_revision_created_at\",\n \"-latest_revision_created_at\",\n \"ord\",\n ]:\n ordering = \"-latest_revision_created_at\"\n\n if ordering == \"ord\":\n # preserve the native ordering from get_children()\n pass\n elif ordering == \"latest_revision_created_at\":\n # order by oldest revision first.\n # Special case NULL entries - these should go at the top of the list.\n # Do this by annotating with Count('latest_revision_created_at'),\n # which returns 0 for these\n pages = pages.annotate(\n null_position=Count(\"latest_revision_created_at\")\n ).order_by(\"null_position\", \"latest_revision_created_at\")\n elif ordering == \"-latest_revision_created_at\":\n # order by oldest revision first.\n # Special case NULL entries - these should go at the end of the list.\n pages = pages.annotate(\n null_position=Count(\"latest_revision_created_at\")\n ).order_by(\"-null_position\", \"-latest_revision_created_at\")\n else:\n pages = pages.order_by(ordering)\n\n # Don't paginate if sorting by page order - all pages must be shown to\n # allow drag-and-drop reordering\n do_paginate = ordering != \"ord\"\n\n # We want specific page instances, but do not need streamfield values here\n pages = pages.defer_streamfields().specific()\n\n # allow hooks defer_streamfieldsyset\n for hook in hooks.get_hooks(\"construct_explorer_page_queryset\"):\n pages = hook(parent_page, pages, request)\n\n # Annotate queryset with various states to be used later for performance optimisations\n if getattr(settings, \"WAGTAIL_WORKFLOW_ENABLED\", True):\n pages = pages.prefetch_workflow_states()\n\n pages = pages.annotate_site_root_state().annotate_approved_schedule()\n\n # Pagination\n if do_paginate:\n paginator = Paginator(pages, per_page=50)\n pages = paginator.get_page(request.GET.get(\"p\"))\n\n show_ordering_column = request.GET.get(\"ordering\") == \"ord\"\n\n side_panels = PageSidePanels(\n request,\n parent_page.specific,\n in_explorer=True,\n preview_enabled=False,\n comments_enabled=False,\n )\n\n context = {\n \"parent_page\": parent_page.specific,\n \"ordering\": ordering,\n \"side_panels\": side_panels,\n \"pages\": pages,\n \"do_paginate\": do_paginate,\n \"locale\": None,\n \"translations\": [],\n \"show_ordering_column\": show_ordering_column,\n \"show_bulk_actions\": not show_ordering_column,\n \"show_locale_labels\": False,\n }\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n if not parent_page.is_root():\n context.update(\n {\n \"locale\": parent_page.locale,\n \"translations\": [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n \"wagtailadmin_explore\", args=[translation.id]\n ),\n }\n for translation in parent_page.get_translations()\n .only(\"id\", \"locale\")\n .select_related(\"locale\")\n ],\n }\n )\n else:\n context[\"show_locale_labels\"] = True\n\n return TemplateResponse(request, \"wagtailadmin/pages/index.html\", context)\n", "path": "wagtail/admin/views/pages/listing.py"}]}
| 3,985 | 534 |
gh_patches_debug_2516
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-6559
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alpha channel ignored in tkinter using PIL.Image.open and PIL.ImageTk.PhotoImage for PNGs in palette mode
### What did you do?
I opened a PNG image (palette mode with alpha channel) using pillow and passed it to tkinter.
### What did you expect to happen?
It should work just like using tkinter.PhotoImage.
### What actually happened?
The alpha channel is ignored.
### What are your OS, Python and Pillow versions?
* OS: `5.10.0-17-amd64 #1 SMP Debian 5.10.136-1 (2022-08-13) x86_64 GNU/Linux`
* Python: `Python 3.9.2`
* Pillow: `9.0.1`
I noticed an error when I wanted to include a PNG image in palette mode with alpha channel into a tkinter application.
Since I wanted to resize it before displaying it, I used Pillow.
However if I execute the code below ([reproduce.zip](https://github.com/python-pillow/Pillow/files/9483016/reproduce.zip)) I see this:

It shows the following 3 images:
- The first image is created using `Image.open` and `ImageTk.PhotoImage`, the transparent part is black.
- The second image shows the same image which has been converted to RGBA mode and then saved (also with Pillow), which works.
- The third image shows the `tkinter.PhotoImage` version which also works fine.
Uncommenting the explicit conversion to RGBA also fixes the problem, which is my current workaround.
However I think this should not be necessary.
```python
from tkinter import Tk, Label, PhotoImage
from PIL import Image, ImageTk
root = Tk()
# via Pillow
pil_image = Image.open('loading.png')
print(pil_image.mode)
#pil_image = pil_image.convert('RGBA')
image1 = ImageTk.PhotoImage(pil_image)
Label(root, image=image1).grid(row=0, column=0, padx=5, pady=5)
# using RGBA mode
pil_image_rgba = Image.open('loading_RGBA.png')
print(pil_image_rgba.mode)
image2 = ImageTk.PhotoImage(pil_image_rgba)
Label(root, image=image2).grid(row=0, column=1, padx=5, pady=5)
# using tkinter.PhotoImage
image3 = PhotoImage(file='loading.png')
Label(root, image=image3).grid(row=0, column=2, padx=5, pady=5)
root.mainloop()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/ImageTk.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # a Tk display interface
6 #
7 # History:
8 # 96-04-08 fl Created
9 # 96-09-06 fl Added getimage method
10 # 96-11-01 fl Rewritten, removed image attribute and crop method
11 # 97-05-09 fl Use PyImagingPaste method instead of image type
12 # 97-05-12 fl Minor tweaks to match the IFUNC95 interface
13 # 97-05-17 fl Support the "pilbitmap" booster patch
14 # 97-06-05 fl Added file= and data= argument to image constructors
15 # 98-03-09 fl Added width and height methods to Image classes
16 # 98-07-02 fl Use default mode for "P" images without palette attribute
17 # 98-07-02 fl Explicitly destroy Tkinter image objects
18 # 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
19 # 99-07-26 fl Automatically hook into Tkinter (if possible)
20 # 99-08-15 fl Hook uses _imagingtk instead of _imaging
21 #
22 # Copyright (c) 1997-1999 by Secret Labs AB
23 # Copyright (c) 1996-1997 by Fredrik Lundh
24 #
25 # See the README file for information on usage and redistribution.
26 #
27
28 import tkinter
29 from io import BytesIO
30
31 from . import Image
32 from ._deprecate import deprecate
33
34 # --------------------------------------------------------------------
35 # Check for Tkinter interface hooks
36
37 _pilbitmap_ok = None
38
39
40 def _pilbitmap_check():
41 global _pilbitmap_ok
42 if _pilbitmap_ok is None:
43 try:
44 im = Image.new("1", (1, 1))
45 tkinter.BitmapImage(data=f"PIL:{im.im.id}")
46 _pilbitmap_ok = 1
47 except tkinter.TclError:
48 _pilbitmap_ok = 0
49 return _pilbitmap_ok
50
51
52 def _get_image_from_kw(kw):
53 source = None
54 if "file" in kw:
55 source = kw.pop("file")
56 elif "data" in kw:
57 source = BytesIO(kw.pop("data"))
58 if source:
59 return Image.open(source)
60
61
62 def _pyimagingtkcall(command, photo, id):
63 tk = photo.tk
64 try:
65 tk.call(command, photo, id)
66 except tkinter.TclError:
67 # activate Tkinter hook
68 # may raise an error if it cannot attach to Tkinter
69 from . import _imagingtk
70
71 _imagingtk.tkinit(tk.interpaddr())
72 tk.call(command, photo, id)
73
74
75 # --------------------------------------------------------------------
76 # PhotoImage
77
78
79 class PhotoImage:
80 """
81 A Tkinter-compatible photo image. This can be used
82 everywhere Tkinter expects an image object. If the image is an RGBA
83 image, pixels having alpha 0 are treated as transparent.
84
85 The constructor takes either a PIL image, or a mode and a size.
86 Alternatively, you can use the ``file`` or ``data`` options to initialize
87 the photo image object.
88
89 :param image: Either a PIL image, or a mode string. If a mode string is
90 used, a size must also be given.
91 :param size: If the first argument is a mode string, this defines the size
92 of the image.
93 :keyword file: A filename to load the image from (using
94 ``Image.open(file)``).
95 :keyword data: An 8-bit string containing image data (as loaded from an
96 image file).
97 """
98
99 def __init__(self, image=None, size=None, **kw):
100
101 # Tk compatibility: file or data
102 if image is None:
103 image = _get_image_from_kw(kw)
104
105 if hasattr(image, "mode") and hasattr(image, "size"):
106 # got an image instead of a mode
107 mode = image.mode
108 if mode == "P":
109 # palette mapped data
110 image.load()
111 try:
112 mode = image.palette.mode
113 except AttributeError:
114 mode = "RGB" # default
115 size = image.size
116 kw["width"], kw["height"] = size
117 else:
118 mode = image
119 image = None
120
121 if mode not in ["1", "L", "RGB", "RGBA"]:
122 mode = Image.getmodebase(mode)
123
124 self.__mode = mode
125 self.__size = size
126 self.__photo = tkinter.PhotoImage(**kw)
127 self.tk = self.__photo.tk
128 if image:
129 self.paste(image)
130
131 def __del__(self):
132 name = self.__photo.name
133 self.__photo.name = None
134 try:
135 self.__photo.tk.call("image", "delete", name)
136 except Exception:
137 pass # ignore internal errors
138
139 def __str__(self):
140 """
141 Get the Tkinter photo image identifier. This method is automatically
142 called by Tkinter whenever a PhotoImage object is passed to a Tkinter
143 method.
144
145 :return: A Tkinter photo image identifier (a string).
146 """
147 return str(self.__photo)
148
149 def width(self):
150 """
151 Get the width of the image.
152
153 :return: The width, in pixels.
154 """
155 return self.__size[0]
156
157 def height(self):
158 """
159 Get the height of the image.
160
161 :return: The height, in pixels.
162 """
163 return self.__size[1]
164
165 def paste(self, im, box=None):
166 """
167 Paste a PIL image into the photo image. Note that this can
168 be very slow if the photo image is displayed.
169
170 :param im: A PIL image. The size must match the target region. If the
171 mode does not match, the image is converted to the mode of
172 the bitmap image.
173 :param box: Deprecated. This parameter will be removed in Pillow 10
174 (2023-07-01).
175 """
176
177 if box is not None:
178 deprecate("The box parameter", 10, None)
179
180 # convert to blittable
181 im.load()
182 image = im.im
183 if image.isblock() and im.mode == self.__mode:
184 block = image
185 else:
186 block = image.new_block(self.__mode, im.size)
187 image.convert2(block, image) # convert directly between buffers
188
189 _pyimagingtkcall("PyImagingPhoto", self.__photo, block.id)
190
191
192 # --------------------------------------------------------------------
193 # BitmapImage
194
195
196 class BitmapImage:
197 """
198 A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
199 expects an image object.
200
201 The given image must have mode "1". Pixels having value 0 are treated as
202 transparent. Options, if any, are passed on to Tkinter. The most commonly
203 used option is ``foreground``, which is used to specify the color for the
204 non-transparent parts. See the Tkinter documentation for information on
205 how to specify colours.
206
207 :param image: A PIL image.
208 """
209
210 def __init__(self, image=None, **kw):
211
212 # Tk compatibility: file or data
213 if image is None:
214 image = _get_image_from_kw(kw)
215
216 self.__mode = image.mode
217 self.__size = image.size
218
219 if _pilbitmap_check():
220 # fast way (requires the pilbitmap booster patch)
221 image.load()
222 kw["data"] = f"PIL:{image.im.id}"
223 self.__im = image # must keep a reference
224 else:
225 # slow but safe way
226 kw["data"] = image.tobitmap()
227 self.__photo = tkinter.BitmapImage(**kw)
228
229 def __del__(self):
230 name = self.__photo.name
231 self.__photo.name = None
232 try:
233 self.__photo.tk.call("image", "delete", name)
234 except Exception:
235 pass # ignore internal errors
236
237 def width(self):
238 """
239 Get the width of the image.
240
241 :return: The width, in pixels.
242 """
243 return self.__size[0]
244
245 def height(self):
246 """
247 Get the height of the image.
248
249 :return: The height, in pixels.
250 """
251 return self.__size[1]
252
253 def __str__(self):
254 """
255 Get the Tkinter bitmap image identifier. This method is automatically
256 called by Tkinter whenever a BitmapImage object is passed to a Tkinter
257 method.
258
259 :return: A Tkinter bitmap image identifier (a string).
260 """
261 return str(self.__photo)
262
263
264 def getimage(photo):
265 """Copies the contents of a PhotoImage to a PIL image memory."""
266 im = Image.new("RGBA", (photo.width(), photo.height()))
267 block = im.im
268
269 _pyimagingtkcall("PyImagingPhotoGet", photo, block.id)
270
271 return im
272
273
274 def _show(image, title):
275 """Helper for the Image.show method."""
276
277 class UI(tkinter.Label):
278 def __init__(self, master, im):
279 if im.mode == "1":
280 self.image = BitmapImage(im, foreground="white", master=master)
281 else:
282 self.image = PhotoImage(im, master=master)
283 super().__init__(master, image=self.image, bg="black", bd=0)
284
285 if not tkinter._default_root:
286 raise OSError("tkinter not initialized")
287 top = tkinter.Toplevel()
288 if title:
289 top.title(title)
290 UI(top, image).pack()
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/PIL/ImageTk.py b/src/PIL/ImageTk.py
--- a/src/PIL/ImageTk.py
+++ b/src/PIL/ImageTk.py
@@ -107,6 +107,7 @@
mode = image.mode
if mode == "P":
# palette mapped data
+ image.apply_transparency()
image.load()
try:
mode = image.palette.mode
|
{"golden_diff": "diff --git a/src/PIL/ImageTk.py b/src/PIL/ImageTk.py\n--- a/src/PIL/ImageTk.py\n+++ b/src/PIL/ImageTk.py\n@@ -107,6 +107,7 @@\n mode = image.mode\n if mode == \"P\":\n # palette mapped data\n+ image.apply_transparency()\n image.load()\n try:\n mode = image.palette.mode\n", "issue": "Alpha channel ignored in tkinter using PIL.Image.open and PIL.ImageTk.PhotoImage for PNGs in palette mode\n### What did you do?\r\n\r\nI opened a PNG image (palette mode with alpha channel) using pillow and passed it to tkinter.\r\n\r\n### What did you expect to happen?\r\n\r\nIt should work just like using tkinter.PhotoImage.\r\n\r\n### What actually happened?\r\n\r\nThe alpha channel is ignored.\r\n\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: `5.10.0-17-amd64 #1 SMP Debian 5.10.136-1 (2022-08-13) x86_64 GNU/Linux`\r\n* Python: `Python 3.9.2`\r\n* Pillow: `9.0.1`\r\n\r\nI noticed an error when I wanted to include a PNG image in palette mode with alpha channel into a tkinter application.\r\nSince I wanted to resize it before displaying it, I used Pillow.\r\nHowever if I execute the code below ([reproduce.zip](https://github.com/python-pillow/Pillow/files/9483016/reproduce.zip)) I see this:\r\n\r\n\r\nIt shows the following 3 images:\r\n\r\n- The first image is created using `Image.open` and `ImageTk.PhotoImage`, the transparent part is black.\r\n- The second image shows the same image which has been converted to RGBA mode and then saved (also with Pillow), which works.\r\n- The third image shows the `tkinter.PhotoImage` version which also works fine.\r\n\r\nUncommenting the explicit conversion to RGBA also fixes the problem, which is my current workaround.\r\nHowever I think this should not be necessary.\r\n\r\n\r\n```python\r\nfrom tkinter import Tk, Label, PhotoImage\r\n\r\nfrom PIL import Image, ImageTk\r\n\r\n\r\nroot = Tk()\r\n# via Pillow\r\npil_image = Image.open('loading.png')\r\nprint(pil_image.mode)\r\n#pil_image = pil_image.convert('RGBA')\r\nimage1 = ImageTk.PhotoImage(pil_image)\r\nLabel(root, image=image1).grid(row=0, column=0, padx=5, pady=5)\r\n\r\n# using RGBA mode\r\npil_image_rgba = Image.open('loading_RGBA.png')\r\nprint(pil_image_rgba.mode)\r\nimage2 = ImageTk.PhotoImage(pil_image_rgba)\r\nLabel(root, image=image2).grid(row=0, column=1, padx=5, pady=5)\r\n\r\n# using tkinter.PhotoImage\r\nimage3 = PhotoImage(file='loading.png')\r\nLabel(root, image=image3).grid(row=0, column=2, padx=5, pady=5)\r\nroot.mainloop()\r\n```\r\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a Tk display interface\n#\n# History:\n# 96-04-08 fl Created\n# 96-09-06 fl Added getimage method\n# 96-11-01 fl Rewritten, removed image attribute and crop method\n# 97-05-09 fl Use PyImagingPaste method instead of image type\n# 97-05-12 fl Minor tweaks to match the IFUNC95 interface\n# 97-05-17 fl Support the \"pilbitmap\" booster patch\n# 97-06-05 fl Added file= and data= argument to image constructors\n# 98-03-09 fl Added width and height methods to Image classes\n# 98-07-02 fl Use default mode for \"P\" images without palette attribute\n# 98-07-02 fl Explicitly destroy Tkinter image objects\n# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)\n# 99-07-26 fl Automatically hook into Tkinter (if possible)\n# 99-08-15 fl Hook uses _imagingtk instead of _imaging\n#\n# Copyright (c) 1997-1999 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport tkinter\nfrom io import BytesIO\n\nfrom . import Image\nfrom ._deprecate import deprecate\n\n# --------------------------------------------------------------------\n# Check for Tkinter interface hooks\n\n_pilbitmap_ok = None\n\n\ndef _pilbitmap_check():\n global _pilbitmap_ok\n if _pilbitmap_ok is None:\n try:\n im = Image.new(\"1\", (1, 1))\n tkinter.BitmapImage(data=f\"PIL:{im.im.id}\")\n _pilbitmap_ok = 1\n except tkinter.TclError:\n _pilbitmap_ok = 0\n return _pilbitmap_ok\n\n\ndef _get_image_from_kw(kw):\n source = None\n if \"file\" in kw:\n source = kw.pop(\"file\")\n elif \"data\" in kw:\n source = BytesIO(kw.pop(\"data\"))\n if source:\n return Image.open(source)\n\n\ndef _pyimagingtkcall(command, photo, id):\n tk = photo.tk\n try:\n tk.call(command, photo, id)\n except tkinter.TclError:\n # activate Tkinter hook\n # may raise an error if it cannot attach to Tkinter\n from . import _imagingtk\n\n _imagingtk.tkinit(tk.interpaddr())\n tk.call(command, photo, id)\n\n\n# --------------------------------------------------------------------\n# PhotoImage\n\n\nclass PhotoImage:\n \"\"\"\n A Tkinter-compatible photo image. This can be used\n everywhere Tkinter expects an image object. If the image is an RGBA\n image, pixels having alpha 0 are treated as transparent.\n\n The constructor takes either a PIL image, or a mode and a size.\n Alternatively, you can use the ``file`` or ``data`` options to initialize\n the photo image object.\n\n :param image: Either a PIL image, or a mode string. If a mode string is\n used, a size must also be given.\n :param size: If the first argument is a mode string, this defines the size\n of the image.\n :keyword file: A filename to load the image from (using\n ``Image.open(file)``).\n :keyword data: An 8-bit string containing image data (as loaded from an\n image file).\n \"\"\"\n\n def __init__(self, image=None, size=None, **kw):\n\n # Tk compatibility: file or data\n if image is None:\n image = _get_image_from_kw(kw)\n\n if hasattr(image, \"mode\") and hasattr(image, \"size\"):\n # got an image instead of a mode\n mode = image.mode\n if mode == \"P\":\n # palette mapped data\n image.load()\n try:\n mode = image.palette.mode\n except AttributeError:\n mode = \"RGB\" # default\n size = image.size\n kw[\"width\"], kw[\"height\"] = size\n else:\n mode = image\n image = None\n\n if mode not in [\"1\", \"L\", \"RGB\", \"RGBA\"]:\n mode = Image.getmodebase(mode)\n\n self.__mode = mode\n self.__size = size\n self.__photo = tkinter.PhotoImage(**kw)\n self.tk = self.__photo.tk\n if image:\n self.paste(image)\n\n def __del__(self):\n name = self.__photo.name\n self.__photo.name = None\n try:\n self.__photo.tk.call(\"image\", \"delete\", name)\n except Exception:\n pass # ignore internal errors\n\n def __str__(self):\n \"\"\"\n Get the Tkinter photo image identifier. This method is automatically\n called by Tkinter whenever a PhotoImage object is passed to a Tkinter\n method.\n\n :return: A Tkinter photo image identifier (a string).\n \"\"\"\n return str(self.__photo)\n\n def width(self):\n \"\"\"\n Get the width of the image.\n\n :return: The width, in pixels.\n \"\"\"\n return self.__size[0]\n\n def height(self):\n \"\"\"\n Get the height of the image.\n\n :return: The height, in pixels.\n \"\"\"\n return self.__size[1]\n\n def paste(self, im, box=None):\n \"\"\"\n Paste a PIL image into the photo image. Note that this can\n be very slow if the photo image is displayed.\n\n :param im: A PIL image. The size must match the target region. If the\n mode does not match, the image is converted to the mode of\n the bitmap image.\n :param box: Deprecated. This parameter will be removed in Pillow 10\n (2023-07-01).\n \"\"\"\n\n if box is not None:\n deprecate(\"The box parameter\", 10, None)\n\n # convert to blittable\n im.load()\n image = im.im\n if image.isblock() and im.mode == self.__mode:\n block = image\n else:\n block = image.new_block(self.__mode, im.size)\n image.convert2(block, image) # convert directly between buffers\n\n _pyimagingtkcall(\"PyImagingPhoto\", self.__photo, block.id)\n\n\n# --------------------------------------------------------------------\n# BitmapImage\n\n\nclass BitmapImage:\n \"\"\"\n A Tkinter-compatible bitmap image. This can be used everywhere Tkinter\n expects an image object.\n\n The given image must have mode \"1\". Pixels having value 0 are treated as\n transparent. Options, if any, are passed on to Tkinter. The most commonly\n used option is ``foreground``, which is used to specify the color for the\n non-transparent parts. See the Tkinter documentation for information on\n how to specify colours.\n\n :param image: A PIL image.\n \"\"\"\n\n def __init__(self, image=None, **kw):\n\n # Tk compatibility: file or data\n if image is None:\n image = _get_image_from_kw(kw)\n\n self.__mode = image.mode\n self.__size = image.size\n\n if _pilbitmap_check():\n # fast way (requires the pilbitmap booster patch)\n image.load()\n kw[\"data\"] = f\"PIL:{image.im.id}\"\n self.__im = image # must keep a reference\n else:\n # slow but safe way\n kw[\"data\"] = image.tobitmap()\n self.__photo = tkinter.BitmapImage(**kw)\n\n def __del__(self):\n name = self.__photo.name\n self.__photo.name = None\n try:\n self.__photo.tk.call(\"image\", \"delete\", name)\n except Exception:\n pass # ignore internal errors\n\n def width(self):\n \"\"\"\n Get the width of the image.\n\n :return: The width, in pixels.\n \"\"\"\n return self.__size[0]\n\n def height(self):\n \"\"\"\n Get the height of the image.\n\n :return: The height, in pixels.\n \"\"\"\n return self.__size[1]\n\n def __str__(self):\n \"\"\"\n Get the Tkinter bitmap image identifier. This method is automatically\n called by Tkinter whenever a BitmapImage object is passed to a Tkinter\n method.\n\n :return: A Tkinter bitmap image identifier (a string).\n \"\"\"\n return str(self.__photo)\n\n\ndef getimage(photo):\n \"\"\"Copies the contents of a PhotoImage to a PIL image memory.\"\"\"\n im = Image.new(\"RGBA\", (photo.width(), photo.height()))\n block = im.im\n\n _pyimagingtkcall(\"PyImagingPhotoGet\", photo, block.id)\n\n return im\n\n\ndef _show(image, title):\n \"\"\"Helper for the Image.show method.\"\"\"\n\n class UI(tkinter.Label):\n def __init__(self, master, im):\n if im.mode == \"1\":\n self.image = BitmapImage(im, foreground=\"white\", master=master)\n else:\n self.image = PhotoImage(im, master=master)\n super().__init__(master, image=self.image, bg=\"black\", bd=0)\n\n if not tkinter._default_root:\n raise OSError(\"tkinter not initialized\")\n top = tkinter.Toplevel()\n if title:\n top.title(title)\n UI(top, image).pack()\n", "path": "src/PIL/ImageTk.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# a Tk display interface\n#\n# History:\n# 96-04-08 fl Created\n# 96-09-06 fl Added getimage method\n# 96-11-01 fl Rewritten, removed image attribute and crop method\n# 97-05-09 fl Use PyImagingPaste method instead of image type\n# 97-05-12 fl Minor tweaks to match the IFUNC95 interface\n# 97-05-17 fl Support the \"pilbitmap\" booster patch\n# 97-06-05 fl Added file= and data= argument to image constructors\n# 98-03-09 fl Added width and height methods to Image classes\n# 98-07-02 fl Use default mode for \"P\" images without palette attribute\n# 98-07-02 fl Explicitly destroy Tkinter image objects\n# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)\n# 99-07-26 fl Automatically hook into Tkinter (if possible)\n# 99-08-15 fl Hook uses _imagingtk instead of _imaging\n#\n# Copyright (c) 1997-1999 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport tkinter\nfrom io import BytesIO\n\nfrom . import Image\nfrom ._deprecate import deprecate\n\n# --------------------------------------------------------------------\n# Check for Tkinter interface hooks\n\n_pilbitmap_ok = None\n\n\ndef _pilbitmap_check():\n global _pilbitmap_ok\n if _pilbitmap_ok is None:\n try:\n im = Image.new(\"1\", (1, 1))\n tkinter.BitmapImage(data=f\"PIL:{im.im.id}\")\n _pilbitmap_ok = 1\n except tkinter.TclError:\n _pilbitmap_ok = 0\n return _pilbitmap_ok\n\n\ndef _get_image_from_kw(kw):\n source = None\n if \"file\" in kw:\n source = kw.pop(\"file\")\n elif \"data\" in kw:\n source = BytesIO(kw.pop(\"data\"))\n if source:\n return Image.open(source)\n\n\ndef _pyimagingtkcall(command, photo, id):\n tk = photo.tk\n try:\n tk.call(command, photo, id)\n except tkinter.TclError:\n # activate Tkinter hook\n # may raise an error if it cannot attach to Tkinter\n from . import _imagingtk\n\n _imagingtk.tkinit(tk.interpaddr())\n tk.call(command, photo, id)\n\n\n# --------------------------------------------------------------------\n# PhotoImage\n\n\nclass PhotoImage:\n \"\"\"\n A Tkinter-compatible photo image. This can be used\n everywhere Tkinter expects an image object. If the image is an RGBA\n image, pixels having alpha 0 are treated as transparent.\n\n The constructor takes either a PIL image, or a mode and a size.\n Alternatively, you can use the ``file`` or ``data`` options to initialize\n the photo image object.\n\n :param image: Either a PIL image, or a mode string. If a mode string is\n used, a size must also be given.\n :param size: If the first argument is a mode string, this defines the size\n of the image.\n :keyword file: A filename to load the image from (using\n ``Image.open(file)``).\n :keyword data: An 8-bit string containing image data (as loaded from an\n image file).\n \"\"\"\n\n def __init__(self, image=None, size=None, **kw):\n\n # Tk compatibility: file or data\n if image is None:\n image = _get_image_from_kw(kw)\n\n if hasattr(image, \"mode\") and hasattr(image, \"size\"):\n # got an image instead of a mode\n mode = image.mode\n if mode == \"P\":\n # palette mapped data\n image.apply_transparency()\n image.load()\n try:\n mode = image.palette.mode\n except AttributeError:\n mode = \"RGB\" # default\n size = image.size\n kw[\"width\"], kw[\"height\"] = size\n else:\n mode = image\n image = None\n\n if mode not in [\"1\", \"L\", \"RGB\", \"RGBA\"]:\n mode = Image.getmodebase(mode)\n\n self.__mode = mode\n self.__size = size\n self.__photo = tkinter.PhotoImage(**kw)\n self.tk = self.__photo.tk\n if image:\n self.paste(image)\n\n def __del__(self):\n name = self.__photo.name\n self.__photo.name = None\n try:\n self.__photo.tk.call(\"image\", \"delete\", name)\n except Exception:\n pass # ignore internal errors\n\n def __str__(self):\n \"\"\"\n Get the Tkinter photo image identifier. This method is automatically\n called by Tkinter whenever a PhotoImage object is passed to a Tkinter\n method.\n\n :return: A Tkinter photo image identifier (a string).\n \"\"\"\n return str(self.__photo)\n\n def width(self):\n \"\"\"\n Get the width of the image.\n\n :return: The width, in pixels.\n \"\"\"\n return self.__size[0]\n\n def height(self):\n \"\"\"\n Get the height of the image.\n\n :return: The height, in pixels.\n \"\"\"\n return self.__size[1]\n\n def paste(self, im, box=None):\n \"\"\"\n Paste a PIL image into the photo image. Note that this can\n be very slow if the photo image is displayed.\n\n :param im: A PIL image. The size must match the target region. If the\n mode does not match, the image is converted to the mode of\n the bitmap image.\n :param box: Deprecated. This parameter will be removed in Pillow 10\n (2023-07-01).\n \"\"\"\n\n if box is not None:\n deprecate(\"The box parameter\", 10, None)\n\n # convert to blittable\n im.load()\n image = im.im\n if image.isblock() and im.mode == self.__mode:\n block = image\n else:\n block = image.new_block(self.__mode, im.size)\n image.convert2(block, image) # convert directly between buffers\n\n _pyimagingtkcall(\"PyImagingPhoto\", self.__photo, block.id)\n\n\n# --------------------------------------------------------------------\n# BitmapImage\n\n\nclass BitmapImage:\n \"\"\"\n A Tkinter-compatible bitmap image. This can be used everywhere Tkinter\n expects an image object.\n\n The given image must have mode \"1\". Pixels having value 0 are treated as\n transparent. Options, if any, are passed on to Tkinter. The most commonly\n used option is ``foreground``, which is used to specify the color for the\n non-transparent parts. See the Tkinter documentation for information on\n how to specify colours.\n\n :param image: A PIL image.\n \"\"\"\n\n def __init__(self, image=None, **kw):\n\n # Tk compatibility: file or data\n if image is None:\n image = _get_image_from_kw(kw)\n\n self.__mode = image.mode\n self.__size = image.size\n\n if _pilbitmap_check():\n # fast way (requires the pilbitmap booster patch)\n image.load()\n kw[\"data\"] = f\"PIL:{image.im.id}\"\n self.__im = image # must keep a reference\n else:\n # slow but safe way\n kw[\"data\"] = image.tobitmap()\n self.__photo = tkinter.BitmapImage(**kw)\n\n def __del__(self):\n name = self.__photo.name\n self.__photo.name = None\n try:\n self.__photo.tk.call(\"image\", \"delete\", name)\n except Exception:\n pass # ignore internal errors\n\n def width(self):\n \"\"\"\n Get the width of the image.\n\n :return: The width, in pixels.\n \"\"\"\n return self.__size[0]\n\n def height(self):\n \"\"\"\n Get the height of the image.\n\n :return: The height, in pixels.\n \"\"\"\n return self.__size[1]\n\n def __str__(self):\n \"\"\"\n Get the Tkinter bitmap image identifier. This method is automatically\n called by Tkinter whenever a BitmapImage object is passed to a Tkinter\n method.\n\n :return: A Tkinter bitmap image identifier (a string).\n \"\"\"\n return str(self.__photo)\n\n\ndef getimage(photo):\n \"\"\"Copies the contents of a PhotoImage to a PIL image memory.\"\"\"\n im = Image.new(\"RGBA\", (photo.width(), photo.height()))\n block = im.im\n\n _pyimagingtkcall(\"PyImagingPhotoGet\", photo, block.id)\n\n return im\n\n\ndef _show(image, title):\n \"\"\"Helper for the Image.show method.\"\"\"\n\n class UI(tkinter.Label):\n def __init__(self, master, im):\n if im.mode == \"1\":\n self.image = BitmapImage(im, foreground=\"white\", master=master)\n else:\n self.image = PhotoImage(im, master=master)\n super().__init__(master, image=self.image, bg=\"black\", bd=0)\n\n if not tkinter._default_root:\n raise OSError(\"tkinter not initialized\")\n top = tkinter.Toplevel()\n if title:\n top.title(title)\n UI(top, image).pack()\n", "path": "src/PIL/ImageTk.py"}]}
| 3,830 | 90 |
gh_patches_debug_2599
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3323
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/fx/tracer/_tracer_utils.py`
Content:
```
1 from typing import List, Union, Any
2 from ..proxy import ColoProxy, ColoAttribute
3 import torch
4 from .meta_patch import meta_patched_function, meta_patched_module
5
6 __all__ = ['is_element_in_list', 'extract_meta']
7
8
9 def is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):
10 if isinstance(elements, (tuple, list, set)):
11 for ele in elements:
12 if ele not in list_:
13 return False, ele
14 else:
15 if elements not in list_:
16 return False, elements
17
18 return True, None
19
20
21 def extract_meta(*args, **kwargs):
22
23 def _convert(val):
24 if isinstance(val, ColoProxy):
25 return val.meta_data
26 elif isinstance(val, (list, tuple)):
27 return type(val)([_convert(ele) for ele in val])
28
29 return val
30
31 new_args = [_convert(val) for val in args]
32 new_kwargs = {k: _convert(v) for k, v in kwargs.items()}
33 return new_args, new_kwargs
34
35
36 def compute_meta_data_for_functions_proxy(target, args, kwargs):
37 args_metas, kwargs_metas = extract_meta(*args, **kwargs)
38
39 # fetch patched function
40 if meta_patched_function.has(target):
41 meta_target = meta_patched_function.get(target)
42 elif meta_patched_function.has(target.__name__):
43 meta_target = meta_patched_function.get(target.__name__)
44 else:
45 meta_target = target
46 meta_out = meta_target(*args_metas, **kwargs_metas)
47 if isinstance(meta_out, torch.Tensor):
48 meta_out = meta_out.to(device="meta")
49
50 return meta_out
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py
--- a/colossalai/fx/tracer/_tracer_utils.py
+++ b/colossalai/fx/tracer/_tracer_utils.py
@@ -1,6 +1,8 @@
-from typing import List, Union, Any
-from ..proxy import ColoProxy, ColoAttribute
+from typing import Any, List, Union
+
import torch
+
+from ..proxy import ColoAttribute, ColoProxy
from .meta_patch import meta_patched_function, meta_patched_module
__all__ = ['is_element_in_list', 'extract_meta']
|
{"golden_diff": "diff --git a/colossalai/fx/tracer/_tracer_utils.py b/colossalai/fx/tracer/_tracer_utils.py\n--- a/colossalai/fx/tracer/_tracer_utils.py\n+++ b/colossalai/fx/tracer/_tracer_utils.py\n@@ -1,6 +1,8 @@\n-from typing import List, Union, Any\n-from ..proxy import ColoProxy, ColoAttribute\n+from typing import Any, List, Union\n+\n import torch\n+\n+from ..proxy import ColoAttribute, ColoProxy\n from .meta_patch import meta_patched_function, meta_patched_module\n \n __all__ = ['is_element_in_list', 'extract_meta']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import List, Union, Any\nfrom ..proxy import ColoProxy, ColoAttribute\nimport torch\nfrom .meta_patch import meta_patched_function, meta_patched_module\n\n__all__ = ['is_element_in_list', 'extract_meta']\n\n\ndef is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):\n if isinstance(elements, (tuple, list, set)):\n for ele in elements:\n if ele not in list_:\n return False, ele\n else:\n if elements not in list_:\n return False, elements\n\n return True, None\n\n\ndef extract_meta(*args, **kwargs):\n\n def _convert(val):\n if isinstance(val, ColoProxy):\n return val.meta_data\n elif isinstance(val, (list, tuple)):\n return type(val)([_convert(ele) for ele in val])\n\n return val\n\n new_args = [_convert(val) for val in args]\n new_kwargs = {k: _convert(v) for k, v in kwargs.items()}\n return new_args, new_kwargs\n\n\ndef compute_meta_data_for_functions_proxy(target, args, kwargs):\n args_metas, kwargs_metas = extract_meta(*args, **kwargs)\n\n # fetch patched function\n if meta_patched_function.has(target):\n meta_target = meta_patched_function.get(target)\n elif meta_patched_function.has(target.__name__):\n meta_target = meta_patched_function.get(target.__name__)\n else:\n meta_target = target\n meta_out = meta_target(*args_metas, **kwargs_metas)\n if isinstance(meta_out, torch.Tensor):\n meta_out = meta_out.to(device=\"meta\")\n\n return meta_out\n", "path": "colossalai/fx/tracer/_tracer_utils.py"}], "after_files": [{"content": "from typing import Any, List, Union\n\nimport torch\n\nfrom ..proxy import ColoAttribute, ColoProxy\nfrom .meta_patch import meta_patched_function, meta_patched_module\n\n__all__ = ['is_element_in_list', 'extract_meta']\n\n\ndef is_element_in_list(elements: Union[List[Any], Any], list_: List[Any]):\n if isinstance(elements, (tuple, list, set)):\n for ele in elements:\n if ele not in list_:\n return False, ele\n else:\n if elements not in list_:\n return False, elements\n\n return True, None\n\n\ndef extract_meta(*args, **kwargs):\n\n def _convert(val):\n if isinstance(val, ColoProxy):\n return val.meta_data\n elif isinstance(val, (list, tuple)):\n return type(val)([_convert(ele) for ele in val])\n\n return val\n\n new_args = [_convert(val) for val in args]\n new_kwargs = {k: _convert(v) for k, v in kwargs.items()}\n return new_args, new_kwargs\n\n\ndef compute_meta_data_for_functions_proxy(target, args, kwargs):\n args_metas, kwargs_metas = extract_meta(*args, **kwargs)\n\n # fetch patched function\n if meta_patched_function.has(target):\n meta_target = meta_patched_function.get(target)\n elif meta_patched_function.has(target.__name__):\n meta_target = meta_patched_function.get(target.__name__)\n else:\n meta_target = target\n meta_out = meta_target(*args_metas, **kwargs_metas)\n if isinstance(meta_out, torch.Tensor):\n meta_out = meta_out.to(device=\"meta\")\n\n return meta_out\n", "path": "colossalai/fx/tracer/_tracer_utils.py"}]}
| 747 | 144 |
gh_patches_debug_4707
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-747
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add publisher as a book field
Currently, the only way to distinguish editions is by their cover and their publication date. It would be nice if editions were also referred to by their publisher.
This is especially useful as part of the alt text of a cover for screen reader users, but also great for people who would like to catalog which edition is by which publisher. I believe OpenLibrary provides a publisher field as well, so this could be automagically filled during an import.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/models/book.py`
Content:
```
1 """ database schema for books and shelves """
2 import re
3
4 from django.db import models
5 from model_utils.managers import InheritanceManager
6
7 from bookwyrm import activitypub
8 from bookwyrm.settings import DOMAIN
9
10 from .activitypub_mixin import OrderedCollectionPageMixin, ObjectMixin
11 from .base_model import BookWyrmModel
12 from . import fields
13
14
15 class BookDataModel(ObjectMixin, BookWyrmModel):
16 """ fields shared between editable book data (books, works, authors) """
17
18 origin_id = models.CharField(max_length=255, null=True, blank=True)
19 openlibrary_key = fields.CharField(
20 max_length=255, blank=True, null=True, deduplication_field=True
21 )
22 librarything_key = fields.CharField(
23 max_length=255, blank=True, null=True, deduplication_field=True
24 )
25 goodreads_key = fields.CharField(
26 max_length=255, blank=True, null=True, deduplication_field=True
27 )
28
29 last_edited_by = models.ForeignKey("User", on_delete=models.PROTECT, null=True)
30
31 class Meta:
32 """ can't initialize this model, that wouldn't make sense """
33
34 abstract = True
35
36 def save(self, *args, **kwargs):
37 """ ensure that the remote_id is within this instance """
38 if self.id:
39 self.remote_id = self.get_remote_id()
40 else:
41 self.origin_id = self.remote_id
42 self.remote_id = None
43 return super().save(*args, **kwargs)
44
45 def broadcast(self, activity, sender, software="bookwyrm"):
46 """ only send book data updates to other bookwyrm instances """
47 super().broadcast(activity, sender, software=software)
48
49
50 class Book(BookDataModel):
51 """ a generic book, which can mean either an edition or a work """
52
53 connector = models.ForeignKey("Connector", on_delete=models.PROTECT, null=True)
54
55 # book/work metadata
56 title = fields.CharField(max_length=255)
57 sort_title = fields.CharField(max_length=255, blank=True, null=True)
58 subtitle = fields.CharField(max_length=255, blank=True, null=True)
59 description = fields.HtmlField(blank=True, null=True)
60 languages = fields.ArrayField(
61 models.CharField(max_length=255), blank=True, default=list
62 )
63 series = fields.CharField(max_length=255, blank=True, null=True)
64 series_number = fields.CharField(max_length=255, blank=True, null=True)
65 subjects = fields.ArrayField(
66 models.CharField(max_length=255), blank=True, null=True, default=list
67 )
68 subject_places = fields.ArrayField(
69 models.CharField(max_length=255), blank=True, null=True, default=list
70 )
71 authors = fields.ManyToManyField("Author")
72 cover = fields.ImageField(
73 upload_to="covers/", blank=True, null=True, alt_field="alt_text"
74 )
75 first_published_date = fields.DateTimeField(blank=True, null=True)
76 published_date = fields.DateTimeField(blank=True, null=True)
77
78 objects = InheritanceManager()
79
80 @property
81 def author_text(self):
82 """ format a list of authors """
83 return ", ".join(a.name for a in self.authors.all())
84
85 @property
86 def latest_readthrough(self):
87 """ most recent readthrough activity """
88 return self.readthrough_set.order_by("-updated_date").first()
89
90 @property
91 def edition_info(self):
92 """ properties of this edition, as a string """
93 items = [
94 self.physical_format if hasattr(self, "physical_format") else None,
95 self.languages[0] + " language"
96 if self.languages and self.languages[0] != "English"
97 else None,
98 str(self.published_date.year) if self.published_date else None,
99 ]
100 return ", ".join(i for i in items if i)
101
102 @property
103 def alt_text(self):
104 """ image alt test """
105 text = "%s" % self.title
106 if self.edition_info:
107 text += " (%s)" % self.edition_info
108 return text
109
110 def save(self, *args, **kwargs):
111 """ can't be abstract for query reasons, but you shouldn't USE it """
112 if not isinstance(self, Edition) and not isinstance(self, Work):
113 raise ValueError("Books should be added as Editions or Works")
114 return super().save(*args, **kwargs)
115
116 def get_remote_id(self):
117 """ editions and works both use "book" instead of model_name """
118 return "https://%s/book/%d" % (DOMAIN, self.id)
119
120 def __repr__(self):
121 return "<{} key={!r} title={!r}>".format(
122 self.__class__,
123 self.openlibrary_key,
124 self.title,
125 )
126
127
128 class Work(OrderedCollectionPageMixin, Book):
129 """ a work (an abstract concept of a book that manifests in an edition) """
130
131 # library of congress catalog control number
132 lccn = fields.CharField(
133 max_length=255, blank=True, null=True, deduplication_field=True
134 )
135 # this has to be nullable but should never be null
136 default_edition = fields.ForeignKey(
137 "Edition", on_delete=models.PROTECT, null=True, load_remote=False
138 )
139
140 def save(self, *args, **kwargs):
141 """ set some fields on the edition object """
142 # set rank
143 for edition in self.editions.all():
144 edition.save()
145 return super().save(*args, **kwargs)
146
147 def get_default_edition(self):
148 """ in case the default edition is not set """
149 return self.default_edition or self.editions.order_by("-edition_rank").first()
150
151 def to_edition_list(self, **kwargs):
152 """ an ordered collection of editions """
153 return self.to_ordered_collection(
154 self.editions.order_by("-edition_rank").all(),
155 remote_id="%s/editions" % self.remote_id,
156 **kwargs
157 )
158
159 activity_serializer = activitypub.Work
160 serialize_reverse_fields = [("editions", "editions", "-edition_rank")]
161 deserialize_reverse_fields = [("editions", "editions")]
162
163
164 class Edition(Book):
165 """ an edition of a book """
166
167 # these identifiers only apply to editions, not works
168 isbn_10 = fields.CharField(
169 max_length=255, blank=True, null=True, deduplication_field=True
170 )
171 isbn_13 = fields.CharField(
172 max_length=255, blank=True, null=True, deduplication_field=True
173 )
174 oclc_number = fields.CharField(
175 max_length=255, blank=True, null=True, deduplication_field=True
176 )
177 asin = fields.CharField(
178 max_length=255, blank=True, null=True, deduplication_field=True
179 )
180 pages = fields.IntegerField(blank=True, null=True)
181 physical_format = fields.CharField(max_length=255, blank=True, null=True)
182 publishers = fields.ArrayField(
183 models.CharField(max_length=255), blank=True, default=list
184 )
185 shelves = models.ManyToManyField(
186 "Shelf",
187 symmetrical=False,
188 through="ShelfBook",
189 through_fields=("book", "shelf"),
190 )
191 parent_work = fields.ForeignKey(
192 "Work",
193 on_delete=models.PROTECT,
194 null=True,
195 related_name="editions",
196 activitypub_field="work",
197 )
198 edition_rank = fields.IntegerField(default=0)
199
200 activity_serializer = activitypub.Edition
201 name_field = "title"
202
203 def get_rank(self):
204 """ calculate how complete the data is on this edition """
205 if self.parent_work and self.parent_work.default_edition == self:
206 # default edition has the highest rank
207 return 20
208 rank = 0
209 rank += int(bool(self.cover)) * 3
210 rank += int(bool(self.isbn_13))
211 rank += int(bool(self.isbn_10))
212 rank += int(bool(self.oclc_number))
213 rank += int(bool(self.pages))
214 rank += int(bool(self.physical_format))
215 rank += int(bool(self.description))
216 # max rank is 9
217 return rank
218
219 def save(self, *args, **kwargs):
220 """ set some fields on the edition object """
221 # calculate isbn 10/13
222 if self.isbn_13 and self.isbn_13[:3] == "978" and not self.isbn_10:
223 self.isbn_10 = isbn_13_to_10(self.isbn_13)
224 if self.isbn_10 and not self.isbn_13:
225 self.isbn_13 = isbn_10_to_13(self.isbn_10)
226
227 # set rank
228 self.edition_rank = self.get_rank()
229
230 return super().save(*args, **kwargs)
231
232
233 def isbn_10_to_13(isbn_10):
234 """ convert an isbn 10 into an isbn 13 """
235 isbn_10 = re.sub(r"[^0-9X]", "", isbn_10)
236 # drop the last character of the isbn 10 number (the original checkdigit)
237 converted = isbn_10[:9]
238 # add "978" to the front
239 converted = "978" + converted
240 # add a check digit to the end
241 # multiply the odd digits by 1 and the even digits by 3 and sum them
242 try:
243 checksum = sum(int(i) for i in converted[::2]) + sum(
244 int(i) * 3 for i in converted[1::2]
245 )
246 except ValueError:
247 return None
248 # add the checksum mod 10 to the end
249 checkdigit = checksum % 10
250 if checkdigit != 0:
251 checkdigit = 10 - checkdigit
252 return converted + str(checkdigit)
253
254
255 def isbn_13_to_10(isbn_13):
256 """ convert isbn 13 to 10, if possible """
257 if isbn_13[:3] != "978":
258 return None
259
260 isbn_13 = re.sub(r"[^0-9X]", "", isbn_13)
261
262 # remove '978' and old checkdigit
263 converted = isbn_13[3:-1]
264 # calculate checkdigit
265 # multiple each digit by 10,9,8.. successively and sum them
266 try:
267 checksum = sum(int(d) * (10 - idx) for (idx, d) in enumerate(converted))
268 except ValueError:
269 return None
270 checkdigit = checksum % 11
271 checkdigit = 11 - checkdigit
272 if checkdigit == 10:
273 checkdigit = "X"
274 return converted + str(checkdigit)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/models/book.py b/bookwyrm/models/book.py
--- a/bookwyrm/models/book.py
+++ b/bookwyrm/models/book.py
@@ -96,6 +96,7 @@
if self.languages and self.languages[0] != "English"
else None,
str(self.published_date.year) if self.published_date else None,
+ ", ".join(self.publishers) if hasattr(self, "publishers") else None,
]
return ", ".join(i for i in items if i)
|
{"golden_diff": "diff --git a/bookwyrm/models/book.py b/bookwyrm/models/book.py\n--- a/bookwyrm/models/book.py\n+++ b/bookwyrm/models/book.py\n@@ -96,6 +96,7 @@\n if self.languages and self.languages[0] != \"English\"\n else None,\n str(self.published_date.year) if self.published_date else None,\n+ \", \".join(self.publishers) if hasattr(self, \"publishers\") else None,\n ]\n return \", \".join(i for i in items if i)\n", "issue": "Add publisher as a book field\nCurrently, the only way to distinguish editions is by their cover and their publication date. It would be nice if editions were also referred to by their publisher.\r\n\r\nThis is especially useful as part of the alt text of a cover for screen reader users, but also great for people who would like to catalog which edition is by which publisher. I believe OpenLibrary provides a publisher field as well, so this could be automagically filled during an import.\n", "before_files": [{"content": "\"\"\" database schema for books and shelves \"\"\"\nimport re\n\nfrom django.db import models\nfrom model_utils.managers import InheritanceManager\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .activitypub_mixin import OrderedCollectionPageMixin, ObjectMixin\nfrom .base_model import BookWyrmModel\nfrom . import fields\n\n\nclass BookDataModel(ObjectMixin, BookWyrmModel):\n \"\"\" fields shared between editable book data (books, works, authors) \"\"\"\n\n origin_id = models.CharField(max_length=255, null=True, blank=True)\n openlibrary_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n librarything_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n goodreads_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n\n last_edited_by = models.ForeignKey(\"User\", on_delete=models.PROTECT, null=True)\n\n class Meta:\n \"\"\" can't initialize this model, that wouldn't make sense \"\"\"\n\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\" ensure that the remote_id is within this instance \"\"\"\n if self.id:\n self.remote_id = self.get_remote_id()\n else:\n self.origin_id = self.remote_id\n self.remote_id = None\n return super().save(*args, **kwargs)\n\n def broadcast(self, activity, sender, software=\"bookwyrm\"):\n \"\"\" only send book data updates to other bookwyrm instances \"\"\"\n super().broadcast(activity, sender, software=software)\n\n\nclass Book(BookDataModel):\n \"\"\" a generic book, which can mean either an edition or a work \"\"\"\n\n connector = models.ForeignKey(\"Connector\", on_delete=models.PROTECT, null=True)\n\n # book/work metadata\n title = fields.CharField(max_length=255)\n sort_title = fields.CharField(max_length=255, blank=True, null=True)\n subtitle = fields.CharField(max_length=255, blank=True, null=True)\n description = fields.HtmlField(blank=True, null=True)\n languages = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n series = fields.CharField(max_length=255, blank=True, null=True)\n series_number = fields.CharField(max_length=255, blank=True, null=True)\n subjects = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n subject_places = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n authors = fields.ManyToManyField(\"Author\")\n cover = fields.ImageField(\n upload_to=\"covers/\", blank=True, null=True, alt_field=\"alt_text\"\n )\n first_published_date = fields.DateTimeField(blank=True, null=True)\n published_date = fields.DateTimeField(blank=True, null=True)\n\n objects = InheritanceManager()\n\n @property\n def author_text(self):\n \"\"\" format a list of authors \"\"\"\n return \", \".join(a.name for a in self.authors.all())\n\n @property\n def latest_readthrough(self):\n \"\"\" most recent readthrough activity \"\"\"\n return self.readthrough_set.order_by(\"-updated_date\").first()\n\n @property\n def edition_info(self):\n \"\"\" properties of this edition, as a string \"\"\"\n items = [\n self.physical_format if hasattr(self, \"physical_format\") else None,\n self.languages[0] + \" language\"\n if self.languages and self.languages[0] != \"English\"\n else None,\n str(self.published_date.year) if self.published_date else None,\n ]\n return \", \".join(i for i in items if i)\n\n @property\n def alt_text(self):\n \"\"\" image alt test \"\"\"\n text = \"%s\" % self.title\n if self.edition_info:\n text += \" (%s)\" % self.edition_info\n return text\n\n def save(self, *args, **kwargs):\n \"\"\" can't be abstract for query reasons, but you shouldn't USE it \"\"\"\n if not isinstance(self, Edition) and not isinstance(self, Work):\n raise ValueError(\"Books should be added as Editions or Works\")\n return super().save(*args, **kwargs)\n\n def get_remote_id(self):\n \"\"\" editions and works both use \"book\" instead of model_name \"\"\"\n return \"https://%s/book/%d\" % (DOMAIN, self.id)\n\n def __repr__(self):\n return \"<{} key={!r} title={!r}>\".format(\n self.__class__,\n self.openlibrary_key,\n self.title,\n )\n\n\nclass Work(OrderedCollectionPageMixin, Book):\n \"\"\" a work (an abstract concept of a book that manifests in an edition) \"\"\"\n\n # library of congress catalog control number\n lccn = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # this has to be nullable but should never be null\n default_edition = fields.ForeignKey(\n \"Edition\", on_delete=models.PROTECT, null=True, load_remote=False\n )\n\n def save(self, *args, **kwargs):\n \"\"\" set some fields on the edition object \"\"\"\n # set rank\n for edition in self.editions.all():\n edition.save()\n return super().save(*args, **kwargs)\n\n def get_default_edition(self):\n \"\"\" in case the default edition is not set \"\"\"\n return self.default_edition or self.editions.order_by(\"-edition_rank\").first()\n\n def to_edition_list(self, **kwargs):\n \"\"\" an ordered collection of editions \"\"\"\n return self.to_ordered_collection(\n self.editions.order_by(\"-edition_rank\").all(),\n remote_id=\"%s/editions\" % self.remote_id,\n **kwargs\n )\n\n activity_serializer = activitypub.Work\n serialize_reverse_fields = [(\"editions\", \"editions\", \"-edition_rank\")]\n deserialize_reverse_fields = [(\"editions\", \"editions\")]\n\n\nclass Edition(Book):\n \"\"\" an edition of a book \"\"\"\n\n # these identifiers only apply to editions, not works\n isbn_10 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isbn_13 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n oclc_number = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n asin = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n pages = fields.IntegerField(blank=True, null=True)\n physical_format = fields.CharField(max_length=255, blank=True, null=True)\n publishers = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n shelves = models.ManyToManyField(\n \"Shelf\",\n symmetrical=False,\n through=\"ShelfBook\",\n through_fields=(\"book\", \"shelf\"),\n )\n parent_work = fields.ForeignKey(\n \"Work\",\n on_delete=models.PROTECT,\n null=True,\n related_name=\"editions\",\n activitypub_field=\"work\",\n )\n edition_rank = fields.IntegerField(default=0)\n\n activity_serializer = activitypub.Edition\n name_field = \"title\"\n\n def get_rank(self):\n \"\"\" calculate how complete the data is on this edition \"\"\"\n if self.parent_work and self.parent_work.default_edition == self:\n # default edition has the highest rank\n return 20\n rank = 0\n rank += int(bool(self.cover)) * 3\n rank += int(bool(self.isbn_13))\n rank += int(bool(self.isbn_10))\n rank += int(bool(self.oclc_number))\n rank += int(bool(self.pages))\n rank += int(bool(self.physical_format))\n rank += int(bool(self.description))\n # max rank is 9\n return rank\n\n def save(self, *args, **kwargs):\n \"\"\" set some fields on the edition object \"\"\"\n # calculate isbn 10/13\n if self.isbn_13 and self.isbn_13[:3] == \"978\" and not self.isbn_10:\n self.isbn_10 = isbn_13_to_10(self.isbn_13)\n if self.isbn_10 and not self.isbn_13:\n self.isbn_13 = isbn_10_to_13(self.isbn_10)\n\n # set rank\n self.edition_rank = self.get_rank()\n\n return super().save(*args, **kwargs)\n\n\ndef isbn_10_to_13(isbn_10):\n \"\"\" convert an isbn 10 into an isbn 13 \"\"\"\n isbn_10 = re.sub(r\"[^0-9X]\", \"\", isbn_10)\n # drop the last character of the isbn 10 number (the original checkdigit)\n converted = isbn_10[:9]\n # add \"978\" to the front\n converted = \"978\" + converted\n # add a check digit to the end\n # multiply the odd digits by 1 and the even digits by 3 and sum them\n try:\n checksum = sum(int(i) for i in converted[::2]) + sum(\n int(i) * 3 for i in converted[1::2]\n )\n except ValueError:\n return None\n # add the checksum mod 10 to the end\n checkdigit = checksum % 10\n if checkdigit != 0:\n checkdigit = 10 - checkdigit\n return converted + str(checkdigit)\n\n\ndef isbn_13_to_10(isbn_13):\n \"\"\" convert isbn 13 to 10, if possible \"\"\"\n if isbn_13[:3] != \"978\":\n return None\n\n isbn_13 = re.sub(r\"[^0-9X]\", \"\", isbn_13)\n\n # remove '978' and old checkdigit\n converted = isbn_13[3:-1]\n # calculate checkdigit\n # multiple each digit by 10,9,8.. successively and sum them\n try:\n checksum = sum(int(d) * (10 - idx) for (idx, d) in enumerate(converted))\n except ValueError:\n return None\n checkdigit = checksum % 11\n checkdigit = 11 - checkdigit\n if checkdigit == 10:\n checkdigit = \"X\"\n return converted + str(checkdigit)\n", "path": "bookwyrm/models/book.py"}], "after_files": [{"content": "\"\"\" database schema for books and shelves \"\"\"\nimport re\n\nfrom django.db import models\nfrom model_utils.managers import InheritanceManager\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .activitypub_mixin import OrderedCollectionPageMixin, ObjectMixin\nfrom .base_model import BookWyrmModel\nfrom . import fields\n\n\nclass BookDataModel(ObjectMixin, BookWyrmModel):\n \"\"\" fields shared between editable book data (books, works, authors) \"\"\"\n\n origin_id = models.CharField(max_length=255, null=True, blank=True)\n openlibrary_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n librarything_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n goodreads_key = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n\n last_edited_by = models.ForeignKey(\"User\", on_delete=models.PROTECT, null=True)\n\n class Meta:\n \"\"\" can't initialize this model, that wouldn't make sense \"\"\"\n\n abstract = True\n\n def save(self, *args, **kwargs):\n \"\"\" ensure that the remote_id is within this instance \"\"\"\n if self.id:\n self.remote_id = self.get_remote_id()\n else:\n self.origin_id = self.remote_id\n self.remote_id = None\n return super().save(*args, **kwargs)\n\n def broadcast(self, activity, sender, software=\"bookwyrm\"):\n \"\"\" only send book data updates to other bookwyrm instances \"\"\"\n super().broadcast(activity, sender, software=software)\n\n\nclass Book(BookDataModel):\n \"\"\" a generic book, which can mean either an edition or a work \"\"\"\n\n connector = models.ForeignKey(\"Connector\", on_delete=models.PROTECT, null=True)\n\n # book/work metadata\n title = fields.CharField(max_length=255)\n sort_title = fields.CharField(max_length=255, blank=True, null=True)\n subtitle = fields.CharField(max_length=255, blank=True, null=True)\n description = fields.HtmlField(blank=True, null=True)\n languages = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n series = fields.CharField(max_length=255, blank=True, null=True)\n series_number = fields.CharField(max_length=255, blank=True, null=True)\n subjects = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n subject_places = fields.ArrayField(\n models.CharField(max_length=255), blank=True, null=True, default=list\n )\n authors = fields.ManyToManyField(\"Author\")\n cover = fields.ImageField(\n upload_to=\"covers/\", blank=True, null=True, alt_field=\"alt_text\"\n )\n first_published_date = fields.DateTimeField(blank=True, null=True)\n published_date = fields.DateTimeField(blank=True, null=True)\n\n objects = InheritanceManager()\n\n @property\n def author_text(self):\n \"\"\" format a list of authors \"\"\"\n return \", \".join(a.name for a in self.authors.all())\n\n @property\n def latest_readthrough(self):\n \"\"\" most recent readthrough activity \"\"\"\n return self.readthrough_set.order_by(\"-updated_date\").first()\n\n @property\n def edition_info(self):\n \"\"\" properties of this edition, as a string \"\"\"\n items = [\n self.physical_format if hasattr(self, \"physical_format\") else None,\n self.languages[0] + \" language\"\n if self.languages and self.languages[0] != \"English\"\n else None,\n str(self.published_date.year) if self.published_date else None,\n \", \".join(self.publishers) if hasattr(self, \"publishers\") else None,\n ]\n return \", \".join(i for i in items if i)\n\n @property\n def alt_text(self):\n \"\"\" image alt test \"\"\"\n text = \"%s\" % self.title\n if self.edition_info:\n text += \" (%s)\" % self.edition_info\n return text\n\n def save(self, *args, **kwargs):\n \"\"\" can't be abstract for query reasons, but you shouldn't USE it \"\"\"\n if not isinstance(self, Edition) and not isinstance(self, Work):\n raise ValueError(\"Books should be added as Editions or Works\")\n return super().save(*args, **kwargs)\n\n def get_remote_id(self):\n \"\"\" editions and works both use \"book\" instead of model_name \"\"\"\n return \"https://%s/book/%d\" % (DOMAIN, self.id)\n\n def __repr__(self):\n return \"<{} key={!r} title={!r}>\".format(\n self.__class__,\n self.openlibrary_key,\n self.title,\n )\n\n\nclass Work(OrderedCollectionPageMixin, Book):\n \"\"\" a work (an abstract concept of a book that manifests in an edition) \"\"\"\n\n # library of congress catalog control number\n lccn = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # this has to be nullable but should never be null\n default_edition = fields.ForeignKey(\n \"Edition\", on_delete=models.PROTECT, null=True, load_remote=False\n )\n\n def save(self, *args, **kwargs):\n \"\"\" set some fields on the edition object \"\"\"\n # set rank\n for edition in self.editions.all():\n edition.save()\n return super().save(*args, **kwargs)\n\n def get_default_edition(self):\n \"\"\" in case the default edition is not set \"\"\"\n return self.default_edition or self.editions.order_by(\"-edition_rank\").first()\n\n def to_edition_list(self, **kwargs):\n \"\"\" an ordered collection of editions \"\"\"\n return self.to_ordered_collection(\n self.editions.order_by(\"-edition_rank\").all(),\n remote_id=\"%s/editions\" % self.remote_id,\n **kwargs\n )\n\n activity_serializer = activitypub.Work\n serialize_reverse_fields = [(\"editions\", \"editions\", \"-edition_rank\")]\n deserialize_reverse_fields = [(\"editions\", \"editions\")]\n\n\nclass Edition(Book):\n \"\"\" an edition of a book \"\"\"\n\n # these identifiers only apply to editions, not works\n isbn_10 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isbn_13 = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n oclc_number = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n asin = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n pages = fields.IntegerField(blank=True, null=True)\n physical_format = fields.CharField(max_length=255, blank=True, null=True)\n publishers = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n shelves = models.ManyToManyField(\n \"Shelf\",\n symmetrical=False,\n through=\"ShelfBook\",\n through_fields=(\"book\", \"shelf\"),\n )\n parent_work = fields.ForeignKey(\n \"Work\",\n on_delete=models.PROTECT,\n null=True,\n related_name=\"editions\",\n activitypub_field=\"work\",\n )\n edition_rank = fields.IntegerField(default=0)\n\n activity_serializer = activitypub.Edition\n name_field = \"title\"\n\n def get_rank(self):\n \"\"\" calculate how complete the data is on this edition \"\"\"\n if self.parent_work and self.parent_work.default_edition == self:\n # default edition has the highest rank\n return 20\n rank = 0\n rank += int(bool(self.cover)) * 3\n rank += int(bool(self.isbn_13))\n rank += int(bool(self.isbn_10))\n rank += int(bool(self.oclc_number))\n rank += int(bool(self.pages))\n rank += int(bool(self.physical_format))\n rank += int(bool(self.description))\n # max rank is 9\n return rank\n\n def save(self, *args, **kwargs):\n \"\"\" set some fields on the edition object \"\"\"\n # calculate isbn 10/13\n if self.isbn_13 and self.isbn_13[:3] == \"978\" and not self.isbn_10:\n self.isbn_10 = isbn_13_to_10(self.isbn_13)\n if self.isbn_10 and not self.isbn_13:\n self.isbn_13 = isbn_10_to_13(self.isbn_10)\n\n # set rank\n self.edition_rank = self.get_rank()\n\n return super().save(*args, **kwargs)\n\n\ndef isbn_10_to_13(isbn_10):\n \"\"\" convert an isbn 10 into an isbn 13 \"\"\"\n isbn_10 = re.sub(r\"[^0-9X]\", \"\", isbn_10)\n # drop the last character of the isbn 10 number (the original checkdigit)\n converted = isbn_10[:9]\n # add \"978\" to the front\n converted = \"978\" + converted\n # add a check digit to the end\n # multiply the odd digits by 1 and the even digits by 3 and sum them\n try:\n checksum = sum(int(i) for i in converted[::2]) + sum(\n int(i) * 3 for i in converted[1::2]\n )\n except ValueError:\n return None\n # add the checksum mod 10 to the end\n checkdigit = checksum % 10\n if checkdigit != 0:\n checkdigit = 10 - checkdigit\n return converted + str(checkdigit)\n\n\ndef isbn_13_to_10(isbn_13):\n \"\"\" convert isbn 13 to 10, if possible \"\"\"\n if isbn_13[:3] != \"978\":\n return None\n\n isbn_13 = re.sub(r\"[^0-9X]\", \"\", isbn_13)\n\n # remove '978' and old checkdigit\n converted = isbn_13[3:-1]\n # calculate checkdigit\n # multiple each digit by 10,9,8.. successively and sum them\n try:\n checksum = sum(int(d) * (10 - idx) for (idx, d) in enumerate(converted))\n except ValueError:\n return None\n checkdigit = checksum % 11\n checkdigit = 11 - checkdigit\n if checkdigit == 10:\n checkdigit = \"X\"\n return converted + str(checkdigit)\n", "path": "bookwyrm/models/book.py"}]}
| 3,444 | 115 |
gh_patches_debug_5791
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-195
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Other] Update name to paperless-ngx in docs
https://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021
> I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless/urls.py`
Content:
```
1 from django.conf.urls import include
2 from django.contrib import admin
3 from django.contrib.auth.decorators import login_required
4 from django.urls import path, re_path
5 from django.views.decorators.csrf import csrf_exempt
6 from django.views.generic import RedirectView
7 from rest_framework.authtoken import views
8 from rest_framework.routers import DefaultRouter
9
10 from django.utils.translation import gettext_lazy as _
11
12 from django.conf import settings
13
14 from paperless.consumers import StatusConsumer
15 from documents.views import (
16 CorrespondentViewSet,
17 UnifiedSearchViewSet,
18 LogViewSet,
19 TagViewSet,
20 DocumentTypeViewSet,
21 IndexView,
22 SearchAutoCompleteView,
23 StatisticsView,
24 PostDocumentView,
25 SavedViewViewSet,
26 BulkEditView,
27 SelectionDataView,
28 BulkDownloadView,
29 )
30 from paperless.views import FaviconView
31
32 api_router = DefaultRouter()
33 api_router.register(r"correspondents", CorrespondentViewSet)
34 api_router.register(r"document_types", DocumentTypeViewSet)
35 api_router.register(r"documents", UnifiedSearchViewSet)
36 api_router.register(r"logs", LogViewSet, basename="logs")
37 api_router.register(r"tags", TagViewSet)
38 api_router.register(r"saved_views", SavedViewViewSet)
39
40
41 urlpatterns = [
42 re_path(
43 r"^api/",
44 include(
45 [
46 re_path(
47 r"^auth/",
48 include(
49 ("rest_framework.urls", "rest_framework"),
50 namespace="rest_framework",
51 ),
52 ),
53 re_path(
54 r"^search/autocomplete/",
55 SearchAutoCompleteView.as_view(),
56 name="autocomplete",
57 ),
58 re_path(r"^statistics/", StatisticsView.as_view(), name="statistics"),
59 re_path(
60 r"^documents/post_document/",
61 PostDocumentView.as_view(),
62 name="post_document",
63 ),
64 re_path(
65 r"^documents/bulk_edit/", BulkEditView.as_view(), name="bulk_edit"
66 ),
67 re_path(
68 r"^documents/selection_data/",
69 SelectionDataView.as_view(),
70 name="selection_data",
71 ),
72 re_path(
73 r"^documents/bulk_download/",
74 BulkDownloadView.as_view(),
75 name="bulk_download",
76 ),
77 path("token/", views.obtain_auth_token),
78 ]
79 + api_router.urls
80 ),
81 ),
82 re_path(r"^favicon.ico$", FaviconView.as_view(), name="favicon"),
83 re_path(r"admin/", admin.site.urls),
84 re_path(
85 r"^fetch/",
86 include(
87 [
88 re_path(
89 r"^doc/(?P<pk>\d+)$",
90 RedirectView.as_view(
91 url=settings.BASE_URL + "api/documents/%(pk)s/download/"
92 ),
93 ),
94 re_path(
95 r"^thumb/(?P<pk>\d+)$",
96 RedirectView.as_view(
97 url=settings.BASE_URL + "api/documents/%(pk)s/thumb/"
98 ),
99 ),
100 re_path(
101 r"^preview/(?P<pk>\d+)$",
102 RedirectView.as_view(
103 url=settings.BASE_URL + "api/documents/%(pk)s/preview/"
104 ),
105 ),
106 ]
107 ),
108 ),
109 re_path(
110 r"^push$",
111 csrf_exempt(
112 RedirectView.as_view(url=settings.BASE_URL + "api/documents/post_document/")
113 ),
114 ),
115 # Frontend assets TODO: this is pretty bad, but it works.
116 path(
117 "assets/<path:path>",
118 RedirectView.as_view(
119 url=settings.STATIC_URL + "frontend/en-US/assets/%(path)s"
120 ),
121 ),
122 # TODO: with localization, this is even worse! :/
123 # login, logout
124 path("accounts/", include("django.contrib.auth.urls")),
125 # Root of the Frontent
126 re_path(r".*", login_required(IndexView.as_view()), name="base"),
127 ]
128
129
130 websocket_urlpatterns = [
131 re_path(r"ws/status/$", StatusConsumer.as_asgi()),
132 ]
133
134 # Text in each page's <h1> (and above login form).
135 admin.site.site_header = "Paperless-ng"
136 # Text at the end of each page's <title>.
137 admin.site.site_title = "Paperless-ng"
138 # Text at the top of the admin index page.
139 admin.site.index_title = _("Paperless-ng administration")
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/paperless/urls.py b/src/paperless/urls.py
--- a/src/paperless/urls.py
+++ b/src/paperless/urls.py
@@ -132,8 +132,8 @@
]
# Text in each page's <h1> (and above login form).
-admin.site.site_header = "Paperless-ng"
+admin.site.site_header = "Paperless-ngx"
# Text at the end of each page's <title>.
-admin.site.site_title = "Paperless-ng"
+admin.site.site_title = "Paperless-ngx"
# Text at the top of the admin index page.
-admin.site.index_title = _("Paperless-ng administration")
+admin.site.index_title = _("Paperless-ngx administration")
|
{"golden_diff": "diff --git a/src/paperless/urls.py b/src/paperless/urls.py\n--- a/src/paperless/urls.py\n+++ b/src/paperless/urls.py\n@@ -132,8 +132,8 @@\n ]\n \n # Text in each page's <h1> (and above login form).\n-admin.site.site_header = \"Paperless-ng\"\n+admin.site.site_header = \"Paperless-ngx\"\n # Text at the end of each page's <title>.\n-admin.site.site_title = \"Paperless-ng\"\n+admin.site.site_title = \"Paperless-ngx\"\n # Text at the top of the admin index page.\n-admin.site.index_title = _(\"Paperless-ng administration\")\n+admin.site.index_title = _(\"Paperless-ngx administration\")\n", "issue": "[Other] Update name to paperless-ngx in docs\nhttps://github.com/paperless-ngx/paperless-ngx/issues/4#issuecomment-1039890021\r\n\r\n> I would go through the places where the project name is mentioned and add the x to it. Only actually code (variable names) could remain unchanged for backwards compatibility.\n", "before_files": [{"content": "from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path, re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\nfrom rest_framework.authtoken import views\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.conf import settings\n\nfrom paperless.consumers import StatusConsumer\nfrom documents.views import (\n CorrespondentViewSet,\n UnifiedSearchViewSet,\n LogViewSet,\n TagViewSet,\n DocumentTypeViewSet,\n IndexView,\n SearchAutoCompleteView,\n StatisticsView,\n PostDocumentView,\n SavedViewViewSet,\n BulkEditView,\n SelectionDataView,\n BulkDownloadView,\n)\nfrom paperless.views import FaviconView\n\napi_router = DefaultRouter()\napi_router.register(r\"correspondents\", CorrespondentViewSet)\napi_router.register(r\"document_types\", DocumentTypeViewSet)\napi_router.register(r\"documents\", UnifiedSearchViewSet)\napi_router.register(r\"logs\", LogViewSet, basename=\"logs\")\napi_router.register(r\"tags\", TagViewSet)\napi_router.register(r\"saved_views\", SavedViewViewSet)\n\n\nurlpatterns = [\n re_path(\n r\"^api/\",\n include(\n [\n re_path(\n r\"^auth/\",\n include(\n (\"rest_framework.urls\", \"rest_framework\"),\n namespace=\"rest_framework\",\n ),\n ),\n re_path(\n r\"^search/autocomplete/\",\n SearchAutoCompleteView.as_view(),\n name=\"autocomplete\",\n ),\n re_path(r\"^statistics/\", StatisticsView.as_view(), name=\"statistics\"),\n re_path(\n r\"^documents/post_document/\",\n PostDocumentView.as_view(),\n name=\"post_document\",\n ),\n re_path(\n r\"^documents/bulk_edit/\", BulkEditView.as_view(), name=\"bulk_edit\"\n ),\n re_path(\n r\"^documents/selection_data/\",\n SelectionDataView.as_view(),\n name=\"selection_data\",\n ),\n re_path(\n r\"^documents/bulk_download/\",\n BulkDownloadView.as_view(),\n name=\"bulk_download\",\n ),\n path(\"token/\", views.obtain_auth_token),\n ]\n + api_router.urls\n ),\n ),\n re_path(r\"^favicon.ico$\", FaviconView.as_view(), name=\"favicon\"),\n re_path(r\"admin/\", admin.site.urls),\n re_path(\n r\"^fetch/\",\n include(\n [\n re_path(\n r\"^doc/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/download/\"\n ),\n ),\n re_path(\n r\"^thumb/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/thumb/\"\n ),\n ),\n re_path(\n r\"^preview/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/preview/\"\n ),\n ),\n ]\n ),\n ),\n re_path(\n r\"^push$\",\n csrf_exempt(\n RedirectView.as_view(url=settings.BASE_URL + \"api/documents/post_document/\")\n ),\n ),\n # Frontend assets TODO: this is pretty bad, but it works.\n path(\n \"assets/<path:path>\",\n RedirectView.as_view(\n url=settings.STATIC_URL + \"frontend/en-US/assets/%(path)s\"\n ),\n ),\n # TODO: with localization, this is even worse! :/\n # login, logout\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n # Root of the Frontent\n re_path(r\".*\", login_required(IndexView.as_view()), name=\"base\"),\n]\n\n\nwebsocket_urlpatterns = [\n re_path(r\"ws/status/$\", StatusConsumer.as_asgi()),\n]\n\n# Text in each page's <h1> (and above login form).\nadmin.site.site_header = \"Paperless-ng\"\n# Text at the end of each page's <title>.\nadmin.site.site_title = \"Paperless-ng\"\n# Text at the top of the admin index page.\nadmin.site.index_title = _(\"Paperless-ng administration\")\n", "path": "src/paperless/urls.py"}], "after_files": [{"content": "from django.conf.urls import include\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import path, re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import RedirectView\nfrom rest_framework.authtoken import views\nfrom rest_framework.routers import DefaultRouter\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.conf import settings\n\nfrom paperless.consumers import StatusConsumer\nfrom documents.views import (\n CorrespondentViewSet,\n UnifiedSearchViewSet,\n LogViewSet,\n TagViewSet,\n DocumentTypeViewSet,\n IndexView,\n SearchAutoCompleteView,\n StatisticsView,\n PostDocumentView,\n SavedViewViewSet,\n BulkEditView,\n SelectionDataView,\n BulkDownloadView,\n)\nfrom paperless.views import FaviconView\n\napi_router = DefaultRouter()\napi_router.register(r\"correspondents\", CorrespondentViewSet)\napi_router.register(r\"document_types\", DocumentTypeViewSet)\napi_router.register(r\"documents\", UnifiedSearchViewSet)\napi_router.register(r\"logs\", LogViewSet, basename=\"logs\")\napi_router.register(r\"tags\", TagViewSet)\napi_router.register(r\"saved_views\", SavedViewViewSet)\n\n\nurlpatterns = [\n re_path(\n r\"^api/\",\n include(\n [\n re_path(\n r\"^auth/\",\n include(\n (\"rest_framework.urls\", \"rest_framework\"),\n namespace=\"rest_framework\",\n ),\n ),\n re_path(\n r\"^search/autocomplete/\",\n SearchAutoCompleteView.as_view(),\n name=\"autocomplete\",\n ),\n re_path(r\"^statistics/\", StatisticsView.as_view(), name=\"statistics\"),\n re_path(\n r\"^documents/post_document/\",\n PostDocumentView.as_view(),\n name=\"post_document\",\n ),\n re_path(\n r\"^documents/bulk_edit/\", BulkEditView.as_view(), name=\"bulk_edit\"\n ),\n re_path(\n r\"^documents/selection_data/\",\n SelectionDataView.as_view(),\n name=\"selection_data\",\n ),\n re_path(\n r\"^documents/bulk_download/\",\n BulkDownloadView.as_view(),\n name=\"bulk_download\",\n ),\n path(\"token/\", views.obtain_auth_token),\n ]\n + api_router.urls\n ),\n ),\n re_path(r\"^favicon.ico$\", FaviconView.as_view(), name=\"favicon\"),\n re_path(r\"admin/\", admin.site.urls),\n re_path(\n r\"^fetch/\",\n include(\n [\n re_path(\n r\"^doc/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/download/\"\n ),\n ),\n re_path(\n r\"^thumb/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/thumb/\"\n ),\n ),\n re_path(\n r\"^preview/(?P<pk>\\d+)$\",\n RedirectView.as_view(\n url=settings.BASE_URL + \"api/documents/%(pk)s/preview/\"\n ),\n ),\n ]\n ),\n ),\n re_path(\n r\"^push$\",\n csrf_exempt(\n RedirectView.as_view(url=settings.BASE_URL + \"api/documents/post_document/\")\n ),\n ),\n # Frontend assets TODO: this is pretty bad, but it works.\n path(\n \"assets/<path:path>\",\n RedirectView.as_view(\n url=settings.STATIC_URL + \"frontend/en-US/assets/%(path)s\"\n ),\n ),\n # TODO: with localization, this is even worse! :/\n # login, logout\n path(\"accounts/\", include(\"django.contrib.auth.urls\")),\n # Root of the Frontent\n re_path(r\".*\", login_required(IndexView.as_view()), name=\"base\"),\n]\n\n\nwebsocket_urlpatterns = [\n re_path(r\"ws/status/$\", StatusConsumer.as_asgi()),\n]\n\n# Text in each page's <h1> (and above login form).\nadmin.site.site_header = \"Paperless-ngx\"\n# Text at the end of each page's <title>.\nadmin.site.site_title = \"Paperless-ngx\"\n# Text at the top of the admin index page.\nadmin.site.index_title = _(\"Paperless-ngx administration\")\n", "path": "src/paperless/urls.py"}]}
| 1,537 | 163 |
gh_patches_debug_34903
|
rasdani/github-patches
|
git_diff
|
data-for-change__anyway-731
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move load_discussions into main
I can't figure out what's the purpose of this file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `anyway/load_discussions.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import argparse
3 from .models import DiscussionMarker
4 import re
5 from .database import db_session
6 import sys
7 import logging
8
9 def main():
10 parser = argparse.ArgumentParser()
11 parser.add_argument('identifiers', type=str, nargs='*',
12 help='Disqus identifiers to create markers for')
13 args = parser.parse_args()
14
15 identifiers = args.identifiers if args.identifiers else sys.stdin
16
17 for identifier in identifiers:
18 m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier)
19 if not m:
20 logging.error("Failed processing: " + identifier)
21 continue
22 (latitude, longitude) = m.group(1, 2)
23 marker = DiscussionMarker.parse({
24 'latitude': latitude,
25 'longitude': longitude,
26 'title': identifier,
27 'identifier': identifier
28 })
29 try:
30 db_session.add(marker)
31 db_session.commit()
32 logging.info("Added: " + identifier, end="")
33 except:
34 db_session.rollback()
35 logging.warn("Failed: " + identifier, end="")
36
37
38 if __name__ == "__main__":
39 main()
40
```
Path: `main.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import click
5 import logging
6
7 @click.group()
8 def cli():
9 pass
10
11
12 @cli.command()
13 @click.option('--open', 'open_server', is_flag=True,
14 help='Open the server for communication from outside', default=False)
15 def testserver(open_server):
16 from anyway import app, united
17 from apscheduler.scheduler import Scheduler
18
19 sched = Scheduler()
20
21 @sched.interval_schedule(hours=12)
22 def scheduled_import():
23 united.main()
24 sched.start()
25
26 logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
27
28 default_host = '0.0.0.0' if open_server else '127.0.0.1'
29 app.run(debug=True, host=os.getenv('IP', default_host),
30 port=int(os.getenv('PORT', 5000)))
31
32
33 @cli.command()
34 def init_db():
35 from anyway.models import init_db
36 init_db()
37
38
39 @cli.command()
40 @click.option('--specific_folder', is_flag=True, default=False)
41 @click.option('--delete_all', is_flag=True)
42 @click.option('--path', type=str, default="static/data/lms")
43 @click.option('--batch_size', type=int, default=100)
44 @click.option('--provider_code', type=int)
45 def process_data(specific_folder, delete_all, path, batch_size, provider_code):
46 from anyway.process import main
47
48 return main(specific_folder=specific_folder, delete_all=delete_all, path=path,
49 batch_size=batch_size, provider_code=provider_code)
50
51
52 @cli.command()
53 @click.option('--light', is_flag=True, help='Import without downloading any new files')
54 @click.option('--username', default='')
55 @click.option('--password', default='')
56 @click.option('--lastmail', is_flag=True)
57 def import_united_data(light, username, password, lastmail):
58 from anyway.united import main
59
60 return main(light=light, username=username, password=password, lastmail=lastmail)
61
62 if __name__ == '__main__':
63 cli()
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py
deleted file mode 100755
--- a/anyway/load_discussions.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-import argparse
-from .models import DiscussionMarker
-import re
-from .database import db_session
-import sys
-import logging
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument('identifiers', type=str, nargs='*',
- help='Disqus identifiers to create markers for')
- args = parser.parse_args()
-
- identifiers = args.identifiers if args.identifiers else sys.stdin
-
- for identifier in identifiers:
- m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier)
- if not m:
- logging.error("Failed processing: " + identifier)
- continue
- (latitude, longitude) = m.group(1, 2)
- marker = DiscussionMarker.parse({
- 'latitude': latitude,
- 'longitude': longitude,
- 'title': identifier,
- 'identifier': identifier
- })
- try:
- db_session.add(marker)
- db_session.commit()
- logging.info("Added: " + identifier, end="")
- except:
- db_session.rollback()
- logging.warn("Failed: " + identifier, end="")
-
-
-if __name__ == "__main__":
- main()
diff --git a/main.py b/main.py
--- a/main.py
+++ b/main.py
@@ -1,8 +1,10 @@
#!/usr/bin/env python
-
-import os
import click
import logging
+import os
+import re
+import sys
+
@click.group()
def cli():
@@ -59,5 +61,36 @@
return main(light=light, username=username, password=password, lastmail=lastmail)
+
[email protected]()
[email protected]('identifiers', nargs=-1)
+def load_discussions(identifiers):
+ from anyway.database import db_session
+ from anyway.models import DiscussionMarker
+
+ identifiers = identifiers or sys.stdin
+
+ for identifier in identifiers:
+ identifier = identifier.strip()
+ m = re.match(r'\((\d+\.\d+),\s*(\d+\.\d+)\)', identifier)
+ if not m:
+ logging.error("Failed processing: " + identifier)
+ continue
+ (latitude, longitude) = m.group(1, 2)
+ marker = DiscussionMarker.parse({
+ 'latitude': latitude,
+ 'longitude': longitude,
+ 'title': identifier,
+ 'identifier': identifier
+ })
+ try:
+ db_session.add(marker)
+ db_session.commit()
+ logging.info("Added: " + identifier)
+ except Exception as e:
+ db_session.rollback()
+ logging.warn("Failed: " + identifier + ": " + e.message)
+
+
if __name__ == '__main__':
cli()
|
{"golden_diff": "diff --git a/anyway/load_discussions.py b/anyway/load_discussions.py\ndeleted file mode 100755\n--- a/anyway/load_discussions.py\n+++ /dev/null\n@@ -1,39 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import argparse\n-from .models import DiscussionMarker\n-import re\n-from .database import db_session\n-import sys\n-import logging\n-\n-def main():\n- parser = argparse.ArgumentParser()\n- parser.add_argument('identifiers', type=str, nargs='*',\n- help='Disqus identifiers to create markers for')\n- args = parser.parse_args()\n-\n- identifiers = args.identifiers if args.identifiers else sys.stdin\n-\n- for identifier in identifiers:\n- m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n- if not m:\n- logging.error(\"Failed processing: \" + identifier)\n- continue\n- (latitude, longitude) = m.group(1, 2)\n- marker = DiscussionMarker.parse({\n- 'latitude': latitude,\n- 'longitude': longitude,\n- 'title': identifier,\n- 'identifier': identifier\n- })\n- try:\n- db_session.add(marker)\n- db_session.commit()\n- logging.info(\"Added: \" + identifier, end=\"\")\n- except:\n- db_session.rollback()\n- logging.warn(\"Failed: \" + identifier, end=\"\")\n-\n-\n-if __name__ == \"__main__\":\n- main()\ndiff --git a/main.py b/main.py\n--- a/main.py\n+++ b/main.py\n@@ -1,8 +1,10 @@\n #!/usr/bin/env python\n-\n-import os\n import click\n import logging\n+import os\n+import re\n+import sys\n+\n \n @click.group()\n def cli():\n@@ -59,5 +61,36 @@\n \n return main(light=light, username=username, password=password, lastmail=lastmail)\n \n+\[email protected]()\[email protected]('identifiers', nargs=-1)\n+def load_discussions(identifiers):\n+ from anyway.database import db_session\n+ from anyway.models import DiscussionMarker\n+\n+ identifiers = identifiers or sys.stdin\n+\n+ for identifier in identifiers:\n+ identifier = identifier.strip()\n+ m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n+ if not m:\n+ logging.error(\"Failed processing: \" + identifier)\n+ continue\n+ (latitude, longitude) = m.group(1, 2)\n+ marker = DiscussionMarker.parse({\n+ 'latitude': latitude,\n+ 'longitude': longitude,\n+ 'title': identifier,\n+ 'identifier': identifier\n+ })\n+ try:\n+ db_session.add(marker)\n+ db_session.commit()\n+ logging.info(\"Added: \" + identifier)\n+ except Exception as e:\n+ db_session.rollback()\n+ logging.warn(\"Failed: \" + identifier + \": \" + e.message)\n+\n+\n if __name__ == '__main__':\n cli()\n", "issue": "Move load_discussions into main\nI can't figure out what's the purpose of this file\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport argparse\nfrom .models import DiscussionMarker\nimport re\nfrom .database import db_session\nimport sys\nimport logging\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('identifiers', type=str, nargs='*',\n help='Disqus identifiers to create markers for')\n args = parser.parse_args()\n\n identifiers = args.identifiers if args.identifiers else sys.stdin\n\n for identifier in identifiers:\n m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n if not m:\n logging.error(\"Failed processing: \" + identifier)\n continue\n (latitude, longitude) = m.group(1, 2)\n marker = DiscussionMarker.parse({\n 'latitude': latitude,\n 'longitude': longitude,\n 'title': identifier,\n 'identifier': identifier\n })\n try:\n db_session.add(marker)\n db_session.commit()\n logging.info(\"Added: \" + identifier, end=\"\")\n except:\n db_session.rollback()\n logging.warn(\"Failed: \" + identifier, end=\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "anyway/load_discussions.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport click\nimport logging\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected]('--open', 'open_server', is_flag=True,\n help='Open the server for communication from outside', default=False)\ndef testserver(open_server):\n from anyway import app, united\n from apscheduler.scheduler import Scheduler\n\n sched = Scheduler()\n\n @sched.interval_schedule(hours=12)\n def scheduled_import():\n united.main()\n sched.start()\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n\n default_host = '0.0.0.0' if open_server else '127.0.0.1'\n app.run(debug=True, host=os.getenv('IP', default_host),\n port=int(os.getenv('PORT', 5000)))\n\n\[email protected]()\ndef init_db():\n from anyway.models import init_db\n init_db()\n\n\[email protected]()\[email protected]('--specific_folder', is_flag=True, default=False)\[email protected]('--delete_all', is_flag=True)\[email protected]('--path', type=str, default=\"static/data/lms\")\[email protected]('--batch_size', type=int, default=100)\[email protected]('--provider_code', type=int)\ndef process_data(specific_folder, delete_all, path, batch_size, provider_code):\n from anyway.process import main\n\n return main(specific_folder=specific_folder, delete_all=delete_all, path=path,\n batch_size=batch_size, provider_code=provider_code)\n\n\[email protected]()\[email protected]('--light', is_flag=True, help='Import without downloading any new files')\[email protected]('--username', default='')\[email protected]('--password', default='')\[email protected]('--lastmail', is_flag=True)\ndef import_united_data(light, username, password, lastmail):\n from anyway.united import main\n\n return main(light=light, username=username, password=password, lastmail=lastmail)\n\nif __name__ == '__main__':\n cli()\n", "path": "main.py"}], "after_files": [{"content": null, "path": "anyway/load_discussions.py"}, {"content": "#!/usr/bin/env python\nimport click\nimport logging\nimport os\nimport re\nimport sys\n\n\[email protected]()\ndef cli():\n pass\n\n\[email protected]()\[email protected]('--open', 'open_server', is_flag=True,\n help='Open the server for communication from outside', default=False)\ndef testserver(open_server):\n from anyway import app, united\n from apscheduler.scheduler import Scheduler\n\n sched = Scheduler()\n\n @sched.interval_schedule(hours=12)\n def scheduled_import():\n united.main()\n sched.start()\n\n logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')\n\n default_host = '0.0.0.0' if open_server else '127.0.0.1'\n app.run(debug=True, host=os.getenv('IP', default_host),\n port=int(os.getenv('PORT', 5000)))\n\n\[email protected]()\ndef init_db():\n from anyway.models import init_db\n init_db()\n\n\[email protected]()\[email protected]('--specific_folder', is_flag=True, default=False)\[email protected]('--delete_all', is_flag=True)\[email protected]('--path', type=str, default=\"static/data/lms\")\[email protected]('--batch_size', type=int, default=100)\[email protected]('--provider_code', type=int)\ndef process_data(specific_folder, delete_all, path, batch_size, provider_code):\n from anyway.process import main\n\n return main(specific_folder=specific_folder, delete_all=delete_all, path=path,\n batch_size=batch_size, provider_code=provider_code)\n\n\[email protected]()\[email protected]('--light', is_flag=True, help='Import without downloading any new files')\[email protected]('--username', default='')\[email protected]('--password', default='')\[email protected]('--lastmail', is_flag=True)\ndef import_united_data(light, username, password, lastmail):\n from anyway.united import main\n\n return main(light=light, username=username, password=password, lastmail=lastmail)\n\n\[email protected]()\[email protected]('identifiers', nargs=-1)\ndef load_discussions(identifiers):\n from anyway.database import db_session\n from anyway.models import DiscussionMarker\n\n identifiers = identifiers or sys.stdin\n\n for identifier in identifiers:\n identifier = identifier.strip()\n m = re.match(r'\\((\\d+\\.\\d+),\\s*(\\d+\\.\\d+)\\)', identifier)\n if not m:\n logging.error(\"Failed processing: \" + identifier)\n continue\n (latitude, longitude) = m.group(1, 2)\n marker = DiscussionMarker.parse({\n 'latitude': latitude,\n 'longitude': longitude,\n 'title': identifier,\n 'identifier': identifier\n })\n try:\n db_session.add(marker)\n db_session.commit()\n logging.info(\"Added: \" + identifier)\n except Exception as e:\n db_session.rollback()\n logging.warn(\"Failed: \" + identifier + \": \" + e.message)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "main.py"}]}
| 1,183 | 685 |
gh_patches_debug_367
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-1305
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `GetMetadataItem` like method
Ref: https://github.com/mapbox/rasterio/issues/1077,
I'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem
**Method Name:** `get_metadata_item` or `get_metadata`
**Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')`
**Code:**
```cython
def get_metadata(self, bidx, ns, dm=None, ovr=None):
"""Returns metadata item
Parameters
----------
bidx: int
Band index, starting with 1.
name: str
The key for the metadata item to fetch.
domain: str
The domain to fetch for.
ovr: int
Overview level
Returns
-------
str
"""
cdef GDALMajorObjectH b = NULL
cdef GDALMajorObjectH obj = NULL
cdef char *value = NULL
cdef const char *name = NULL
cdef const char *domain = NULL
ns = ns.encode('utf-8')
name = ns
if dm:
dm = dm.encode('utf-8')
domain = dm
b = self.band(bidx)
if ovr:
b = GDALGetOverview(b, ovr)
obj = b
value = GDALGetMetadataItem(obj, name, domain)
if value == NULL:
return None
else:
return value
```
@sgillies I'm happy to submit a PR for that :-)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/errors.py`
Content:
```
1 """Errors and Warnings."""
2
3 from click import FileError
4
5
6 class RasterioError(Exception):
7 """Root exception class"""
8
9
10 class WindowError(RasterioError):
11 """Raised when errors occur during window operations"""
12
13
14 class CRSError(ValueError):
15 """Raised when a CRS string or mapping is invalid or cannot serve
16 to define a coordinate transformation."""
17
18
19 class EnvError(RasterioError):
20 """Raised when the state of GDAL/AWS environment cannot be created
21 or modified."""
22
23
24 class DriverRegistrationError(ValueError):
25 """Raised when a format driver is requested but is not registered."""
26
27
28 class FileOverwriteError(FileError):
29 """Raised when Rasterio's CLI refuses to clobber output files."""
30
31 def __init__(self, message):
32 """Raise FileOverwriteError with message as hint."""
33 super(FileOverwriteError, self).__init__('', hint=message)
34
35
36 class RasterioIOError(IOError):
37 """Raised when a dataset cannot be opened using one of the
38 registered format drivers."""
39
40
41 class NodataShadowWarning(UserWarning):
42 """Warn that a dataset's nodata attribute is shadowing its alpha band."""
43
44 def __str__(self):
45 return ("The dataset's nodata attribute is shadowing "
46 "the alpha band. All masks will be determined "
47 "by the nodata attribute")
48
49
50 class NotGeoreferencedWarning(UserWarning):
51 """Warn that a dataset isn't georeferenced."""
52
53
54 class GDALBehaviorChangeException(RuntimeError):
55 """Raised when GDAL's behavior differs from the given arguments. For
56 example, antimeridian cutting is always on as of GDAL 2.2.0. Users
57 expecting it to be off will be presented with a MultiPolygon when the
58 rest of their code expects a Polygon.
59
60 # Raises an exception on GDAL >= 2.2.0
61 rasterio.warp.transform_geometry(
62 src_crs, dst_crs, antimeridian_cutting=False)
63 """
64
65
66 class GDALOptionNotImplementedError(RasterioError):
67 """A dataset opening or dataset creation option can't be supported
68
69 This will be raised from Rasterio's shim modules. For example, when
70 a user passes arguments to open_dataset() that can't be evaluated
71 by GDAL 1.x.
72 """
73
74 class GDALVersionError(RasterioError):
75 """Raised if the runtime version of GDAL does not meet the required
76 version of GDAL."""
77
78
79 class WindowEvaluationError(ValueError):
80 """Raised when window evaluation fails"""
81
82
83 class RasterioDeprecationWarning(UserWarning):
84 """Rasterio module deprecations"""
85
86
87 class RasterBlockError(RasterioError):
88 """Raised when raster block access fails"""
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/errors.py b/rasterio/errors.py
--- a/rasterio/errors.py
+++ b/rasterio/errors.py
@@ -86,3 +86,7 @@
class RasterBlockError(RasterioError):
"""Raised when raster block access fails"""
+
+
+class BandOverviewError(UserWarning):
+ """Raised when a band overview access fails."""
|
{"golden_diff": "diff --git a/rasterio/errors.py b/rasterio/errors.py\n--- a/rasterio/errors.py\n+++ b/rasterio/errors.py\n@@ -86,3 +86,7 @@\n \n class RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n+\n+\n+class BandOverviewError(UserWarning):\n+ \"\"\"Raised when a band overview access fails.\"\"\"\n", "issue": "Add `GetMetadataItem` like method \nRef: https://github.com/mapbox/rasterio/issues/1077, \r\n\r\nI'm proposing to add a new method in https://github.com/mapbox/rasterio/blob/master/rasterio/_base.pyx to replicate GDAL GetMetadataItem\r\n\r\n\r\n**Method Name:** `get_metadata_item` or `get_metadata`\r\n**Why:** I need to be able to get TIFF metadata like `band.GetMetadataItem('IFD_OFFSET', 'TIFF')`\r\n\r\n**Code:**\r\n\r\n```cython\r\n def get_metadata(self, bidx, ns, dm=None, ovr=None):\r\n \"\"\"Returns metadata item\r\n\r\n Parameters\r\n ----------\r\n bidx: int\r\n Band index, starting with 1.\r\n name: str\r\n The key for the metadata item to fetch.\r\n domain: str\r\n The domain to fetch for.\r\n ovr: int\r\n Overview level\r\n\r\n Returns\r\n -------\r\n str\r\n \"\"\"\r\n cdef GDALMajorObjectH b = NULL\r\n cdef GDALMajorObjectH obj = NULL\r\n cdef char *value = NULL\r\n cdef const char *name = NULL\r\n cdef const char *domain = NULL\r\n\r\n ns = ns.encode('utf-8')\r\n name = ns\r\n\r\n if dm:\r\n dm = dm.encode('utf-8')\r\n domain = dm\r\n\r\n b = self.band(bidx)\r\n if ovr:\r\n b = GDALGetOverview(b, ovr)\r\n\r\n obj = b\r\n\r\n value = GDALGetMetadataItem(obj, name, domain)\r\n if value == NULL:\r\n return None\r\n else:\r\n return value\r\n```\r\n\r\n@sgillies I'm happy to submit a PR for that :-) \n", "before_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n", "path": "rasterio/errors.py"}], "after_files": [{"content": "\"\"\"Errors and Warnings.\"\"\"\n\nfrom click import FileError\n\n\nclass RasterioError(Exception):\n \"\"\"Root exception class\"\"\"\n\n\nclass WindowError(RasterioError):\n \"\"\"Raised when errors occur during window operations\"\"\"\n\n\nclass CRSError(ValueError):\n \"\"\"Raised when a CRS string or mapping is invalid or cannot serve\n to define a coordinate transformation.\"\"\"\n\n\nclass EnvError(RasterioError):\n \"\"\"Raised when the state of GDAL/AWS environment cannot be created\n or modified.\"\"\"\n\n\nclass DriverRegistrationError(ValueError):\n \"\"\"Raised when a format driver is requested but is not registered.\"\"\"\n\n\nclass FileOverwriteError(FileError):\n \"\"\"Raised when Rasterio's CLI refuses to clobber output files.\"\"\"\n\n def __init__(self, message):\n \"\"\"Raise FileOverwriteError with message as hint.\"\"\"\n super(FileOverwriteError, self).__init__('', hint=message)\n\n\nclass RasterioIOError(IOError):\n \"\"\"Raised when a dataset cannot be opened using one of the\n registered format drivers.\"\"\"\n\n\nclass NodataShadowWarning(UserWarning):\n \"\"\"Warn that a dataset's nodata attribute is shadowing its alpha band.\"\"\"\n\n def __str__(self):\n return (\"The dataset's nodata attribute is shadowing \"\n \"the alpha band. All masks will be determined \"\n \"by the nodata attribute\")\n\n\nclass NotGeoreferencedWarning(UserWarning):\n \"\"\"Warn that a dataset isn't georeferenced.\"\"\"\n\n\nclass GDALBehaviorChangeException(RuntimeError):\n \"\"\"Raised when GDAL's behavior differs from the given arguments. For\n example, antimeridian cutting is always on as of GDAL 2.2.0. Users\n expecting it to be off will be presented with a MultiPolygon when the\n rest of their code expects a Polygon.\n\n # Raises an exception on GDAL >= 2.2.0\n rasterio.warp.transform_geometry(\n src_crs, dst_crs, antimeridian_cutting=False)\n \"\"\"\n\n\nclass GDALOptionNotImplementedError(RasterioError):\n \"\"\"A dataset opening or dataset creation option can't be supported\n\n This will be raised from Rasterio's shim modules. For example, when\n a user passes arguments to open_dataset() that can't be evaluated\n by GDAL 1.x.\n \"\"\"\n\nclass GDALVersionError(RasterioError):\n \"\"\"Raised if the runtime version of GDAL does not meet the required\n version of GDAL.\"\"\"\n\n\nclass WindowEvaluationError(ValueError):\n \"\"\"Raised when window evaluation fails\"\"\"\n\n\nclass RasterioDeprecationWarning(UserWarning):\n \"\"\"Rasterio module deprecations\"\"\"\n\n\nclass RasterBlockError(RasterioError):\n \"\"\"Raised when raster block access fails\"\"\"\n\n\nclass BandOverviewError(UserWarning):\n \"\"\"Raised when a band overview access fails.\"\"\"\n", "path": "rasterio/errors.py"}]}
| 1,399 | 85 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.