problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_17942
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmocr-334
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When I want to train a fcenet, I met a problem

It did run, but several epoch,there note that data not on the gpu.
how to solve it?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmocr/models/textdet/losses/fce_loss.py`
Content:
```
1 import numpy as np
2 import torch
3 import torch.nn.functional as F
4 from mmdet.core import multi_apply
5 from mmdet.models.builder import LOSSES
6 from torch import nn
7
8
9 @LOSSES.register_module()
10 class FCELoss(nn.Module):
11 """The class for implementing FCENet loss
12 FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped
13 Text Detection
14
15 [https://arxiv.org/abs/2104.10442]
16
17 Args:
18 fourier_degree (int) : The maximum Fourier transform degree k.
19 num_sample (int) : The sampling points number of regression
20 loss. If it is too small, fcenet tends to be overfitting.
21 ohem_ratio (float): the negative/positive ratio in OHEM.
22 """
23
24 def __init__(self, fourier_degree, num_sample, ohem_ratio=3.):
25 super().__init__()
26 self.fourier_degree = fourier_degree
27 self.num_sample = num_sample
28 self.ohem_ratio = ohem_ratio
29
30 def forward(self, preds, _, p3_maps, p4_maps, p5_maps):
31 assert isinstance(preds, list)
32 assert p3_maps[0].shape[0] == 4 * self.fourier_degree + 5,\
33 'fourier degree not equal in FCEhead and FCEtarget'
34
35 device = preds[0][0].device
36 # to tensor
37 gts = [p3_maps, p4_maps, p5_maps]
38 for idx, maps in enumerate(gts):
39 gts[idx] = torch.from_numpy(np.stack(maps)).float().to(device)
40
41 losses = multi_apply(self.forward_single, preds, gts)
42
43 loss_tr = torch.tensor(0., device=device).float()
44 loss_tcl = torch.tensor(0., device=device).float()
45 loss_reg_x = torch.tensor(0., device=device).float()
46 loss_reg_y = torch.tensor(0., device=device).float()
47
48 for idx, loss in enumerate(losses):
49 if idx == 0:
50 loss_tr += sum(loss)
51 elif idx == 1:
52 loss_tcl += sum(loss)
53 elif idx == 2:
54 loss_reg_x += sum(loss)
55 else:
56 loss_reg_y += sum(loss)
57
58 results = dict(
59 loss_text=loss_tr,
60 loss_center=loss_tcl,
61 loss_reg_x=loss_reg_x,
62 loss_reg_y=loss_reg_y,
63 )
64
65 return results
66
67 def forward_single(self, pred, gt):
68 cls_pred = pred[0].permute(0, 2, 3, 1).contiguous()
69 reg_pred = pred[1].permute(0, 2, 3, 1).contiguous()
70 gt = gt.permute(0, 2, 3, 1).contiguous()
71
72 k = 2 * self.fourier_degree + 1
73 tr_pred = cls_pred[:, :, :, :2].view(-1, 2)
74 tcl_pred = cls_pred[:, :, :, 2:].view(-1, 2)
75 x_pred = reg_pred[:, :, :, 0:k].view(-1, k)
76 y_pred = reg_pred[:, :, :, k:2 * k].view(-1, k)
77
78 tr_mask = gt[:, :, :, :1].view(-1)
79 tcl_mask = gt[:, :, :, 1:2].view(-1)
80 train_mask = gt[:, :, :, 2:3].view(-1)
81 x_map = gt[:, :, :, 3:3 + k].view(-1, k)
82 y_map = gt[:, :, :, 3 + k:].view(-1, k)
83
84 tr_train_mask = train_mask * tr_mask
85 device = x_map.device
86 # tr loss
87 loss_tr = self.ohem(tr_pred, tr_mask.long(), train_mask.long())
88
89 # tcl loss
90 loss_tcl = torch.tensor(0.).float().to(device)
91 tr_neg_mask = 1 - tr_train_mask
92 if tr_train_mask.sum().item() > 0:
93 loss_tcl_pos = F.cross_entropy(
94 tcl_pred[tr_train_mask.bool()],
95 tcl_mask[tr_train_mask.bool()].long())
96 loss_tcl_neg = F.cross_entropy(tcl_pred[tr_neg_mask.bool()],
97 tcl_mask[tr_neg_mask.bool()].long())
98 loss_tcl = loss_tcl_pos + 0.5 * loss_tcl_neg
99
100 # regression loss
101 loss_reg_x = torch.tensor(0.).float().to(device)
102 loss_reg_y = torch.tensor(0.).float().to(device)
103 if tr_train_mask.sum().item() > 0:
104 weight = (tr_mask[tr_train_mask.bool()].float() +
105 tcl_mask[tr_train_mask.bool()].float()) / 2
106 weight = weight.contiguous().view(-1, 1)
107
108 ft_x, ft_y = self.fourier2poly(x_map, y_map)
109 ft_x_pre, ft_y_pre = self.fourier2poly(x_pred, y_pred)
110
111 loss_reg_x = torch.mean(weight * F.smooth_l1_loss(
112 ft_x_pre[tr_train_mask.bool()],
113 ft_x[tr_train_mask.bool()],
114 reduction='none'))
115 loss_reg_y = torch.mean(weight * F.smooth_l1_loss(
116 ft_y_pre[tr_train_mask.bool()],
117 ft_y[tr_train_mask.bool()],
118 reduction='none'))
119
120 return loss_tr, loss_tcl, loss_reg_x, loss_reg_y
121
122 def ohem(self, predict, target, train_mask):
123 pos = (target * train_mask).bool()
124 neg = ((1 - target) * train_mask).bool()
125
126 n_pos = pos.float().sum()
127
128 if n_pos.item() > 0:
129 loss_pos = F.cross_entropy(
130 predict[pos], target[pos], reduction='sum')
131 loss_neg = F.cross_entropy(
132 predict[neg], target[neg], reduction='none')
133 n_neg = min(
134 int(neg.float().sum().item()),
135 int(self.ohem_ratio * n_pos.float()))
136 else:
137 loss_pos = torch.tensor(0.)
138 loss_neg = F.cross_entropy(
139 predict[neg], target[neg], reduction='none')
140 n_neg = 100
141 if len(loss_neg) > n_neg:
142 loss_neg, _ = torch.topk(loss_neg, n_neg)
143
144 return (loss_pos + loss_neg.sum()) / (n_pos + n_neg).float()
145
146 def fourier2poly(self, real_maps, imag_maps):
147 """Transform Fourier coefficient maps to polygon maps.
148
149 Args:
150 real_maps (tensor): A map composed of the real parts of the
151 Fourier coefficients, whose shape is (-1, 2k+1)
152 imag_maps (tensor):A map composed of the imag parts of the
153 Fourier coefficients, whose shape is (-1, 2k+1)
154
155 Returns
156 x_maps (tensor): A map composed of the x value of the polygon
157 represented by n sample points (xn, yn), whose shape is (-1, n)
158 y_maps (tensor): A map composed of the y value of the polygon
159 represented by n sample points (xn, yn), whose shape is (-1, n)
160 """
161
162 device = real_maps.device
163
164 k_vect = torch.arange(
165 -self.fourier_degree,
166 self.fourier_degree + 1,
167 dtype=torch.float,
168 device=device).view(-1, 1)
169 i_vect = torch.arange(
170 0, self.num_sample, dtype=torch.float, device=device).view(1, -1)
171
172 transform_matrix = 2 * np.pi / self.num_sample * torch.mm(
173 k_vect, i_vect)
174
175 x1 = torch.einsum('ak, kn-> an', real_maps,
176 torch.cos(transform_matrix))
177 x2 = torch.einsum('ak, kn-> an', imag_maps,
178 torch.sin(transform_matrix))
179 y1 = torch.einsum('ak, kn-> an', real_maps,
180 torch.sin(transform_matrix))
181 y2 = torch.einsum('ak, kn-> an', imag_maps,
182 torch.cos(transform_matrix))
183
184 x_maps = x1 - x2
185 y_maps = y1 + y2
186
187 return x_maps, y_maps
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmocr/models/textdet/losses/fce_loss.py b/mmocr/models/textdet/losses/fce_loss.py
--- a/mmocr/models/textdet/losses/fce_loss.py
+++ b/mmocr/models/textdet/losses/fce_loss.py
@@ -120,6 +120,7 @@
return loss_tr, loss_tcl, loss_reg_x, loss_reg_y
def ohem(self, predict, target, train_mask):
+ device = train_mask.device
pos = (target * train_mask).bool()
neg = ((1 - target) * train_mask).bool()
@@ -134,7 +135,7 @@
int(neg.float().sum().item()),
int(self.ohem_ratio * n_pos.float()))
else:
- loss_pos = torch.tensor(0.)
+ loss_pos = torch.tensor(0.).to(device)
loss_neg = F.cross_entropy(
predict[neg], target[neg], reduction='none')
n_neg = 100
|
{"golden_diff": "diff --git a/mmocr/models/textdet/losses/fce_loss.py b/mmocr/models/textdet/losses/fce_loss.py\n--- a/mmocr/models/textdet/losses/fce_loss.py\n+++ b/mmocr/models/textdet/losses/fce_loss.py\n@@ -120,6 +120,7 @@\n return loss_tr, loss_tcl, loss_reg_x, loss_reg_y\n \n def ohem(self, predict, target, train_mask):\n+ device = train_mask.device\n pos = (target * train_mask).bool()\n neg = ((1 - target) * train_mask).bool()\n \n@@ -134,7 +135,7 @@\n int(neg.float().sum().item()),\n int(self.ohem_ratio * n_pos.float()))\n else:\n- loss_pos = torch.tensor(0.)\n+ loss_pos = torch.tensor(0.).to(device)\n loss_neg = F.cross_entropy(\n predict[neg], target[neg], reduction='none')\n n_neg = 100\n", "issue": "When I want to train a fcenet, I met a problem\n\r\nIt did run, but several epoch,there note that data not on the gpu.\r\nhow to solve it?\n", "before_files": [{"content": "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmdet.core import multi_apply\nfrom mmdet.models.builder import LOSSES\nfrom torch import nn\n\n\[email protected]_module()\nclass FCELoss(nn.Module):\n \"\"\"The class for implementing FCENet loss\n FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped\n Text Detection\n\n [https://arxiv.org/abs/2104.10442]\n\n Args:\n fourier_degree (int) : The maximum Fourier transform degree k.\n num_sample (int) : The sampling points number of regression\n loss. If it is too small, fcenet tends to be overfitting.\n ohem_ratio (float): the negative/positive ratio in OHEM.\n \"\"\"\n\n def __init__(self, fourier_degree, num_sample, ohem_ratio=3.):\n super().__init__()\n self.fourier_degree = fourier_degree\n self.num_sample = num_sample\n self.ohem_ratio = ohem_ratio\n\n def forward(self, preds, _, p3_maps, p4_maps, p5_maps):\n assert isinstance(preds, list)\n assert p3_maps[0].shape[0] == 4 * self.fourier_degree + 5,\\\n 'fourier degree not equal in FCEhead and FCEtarget'\n\n device = preds[0][0].device\n # to tensor\n gts = [p3_maps, p4_maps, p5_maps]\n for idx, maps in enumerate(gts):\n gts[idx] = torch.from_numpy(np.stack(maps)).float().to(device)\n\n losses = multi_apply(self.forward_single, preds, gts)\n\n loss_tr = torch.tensor(0., device=device).float()\n loss_tcl = torch.tensor(0., device=device).float()\n loss_reg_x = torch.tensor(0., device=device).float()\n loss_reg_y = torch.tensor(0., device=device).float()\n\n for idx, loss in enumerate(losses):\n if idx == 0:\n loss_tr += sum(loss)\n elif idx == 1:\n loss_tcl += sum(loss)\n elif idx == 2:\n loss_reg_x += sum(loss)\n else:\n loss_reg_y += sum(loss)\n\n results = dict(\n loss_text=loss_tr,\n loss_center=loss_tcl,\n loss_reg_x=loss_reg_x,\n loss_reg_y=loss_reg_y,\n )\n\n return results\n\n def forward_single(self, pred, gt):\n cls_pred = pred[0].permute(0, 2, 3, 1).contiguous()\n reg_pred = pred[1].permute(0, 2, 3, 1).contiguous()\n gt = gt.permute(0, 2, 3, 1).contiguous()\n\n k = 2 * self.fourier_degree + 1\n tr_pred = cls_pred[:, :, :, :2].view(-1, 2)\n tcl_pred = cls_pred[:, :, :, 2:].view(-1, 2)\n x_pred = reg_pred[:, :, :, 0:k].view(-1, k)\n y_pred = reg_pred[:, :, :, k:2 * k].view(-1, k)\n\n tr_mask = gt[:, :, :, :1].view(-1)\n tcl_mask = gt[:, :, :, 1:2].view(-1)\n train_mask = gt[:, :, :, 2:3].view(-1)\n x_map = gt[:, :, :, 3:3 + k].view(-1, k)\n y_map = gt[:, :, :, 3 + k:].view(-1, k)\n\n tr_train_mask = train_mask * tr_mask\n device = x_map.device\n # tr loss\n loss_tr = self.ohem(tr_pred, tr_mask.long(), train_mask.long())\n\n # tcl loss\n loss_tcl = torch.tensor(0.).float().to(device)\n tr_neg_mask = 1 - tr_train_mask\n if tr_train_mask.sum().item() > 0:\n loss_tcl_pos = F.cross_entropy(\n tcl_pred[tr_train_mask.bool()],\n tcl_mask[tr_train_mask.bool()].long())\n loss_tcl_neg = F.cross_entropy(tcl_pred[tr_neg_mask.bool()],\n tcl_mask[tr_neg_mask.bool()].long())\n loss_tcl = loss_tcl_pos + 0.5 * loss_tcl_neg\n\n # regression loss\n loss_reg_x = torch.tensor(0.).float().to(device)\n loss_reg_y = torch.tensor(0.).float().to(device)\n if tr_train_mask.sum().item() > 0:\n weight = (tr_mask[tr_train_mask.bool()].float() +\n tcl_mask[tr_train_mask.bool()].float()) / 2\n weight = weight.contiguous().view(-1, 1)\n\n ft_x, ft_y = self.fourier2poly(x_map, y_map)\n ft_x_pre, ft_y_pre = self.fourier2poly(x_pred, y_pred)\n\n loss_reg_x = torch.mean(weight * F.smooth_l1_loss(\n ft_x_pre[tr_train_mask.bool()],\n ft_x[tr_train_mask.bool()],\n reduction='none'))\n loss_reg_y = torch.mean(weight * F.smooth_l1_loss(\n ft_y_pre[tr_train_mask.bool()],\n ft_y[tr_train_mask.bool()],\n reduction='none'))\n\n return loss_tr, loss_tcl, loss_reg_x, loss_reg_y\n\n def ohem(self, predict, target, train_mask):\n pos = (target * train_mask).bool()\n neg = ((1 - target) * train_mask).bool()\n\n n_pos = pos.float().sum()\n\n if n_pos.item() > 0:\n loss_pos = F.cross_entropy(\n predict[pos], target[pos], reduction='sum')\n loss_neg = F.cross_entropy(\n predict[neg], target[neg], reduction='none')\n n_neg = min(\n int(neg.float().sum().item()),\n int(self.ohem_ratio * n_pos.float()))\n else:\n loss_pos = torch.tensor(0.)\n loss_neg = F.cross_entropy(\n predict[neg], target[neg], reduction='none')\n n_neg = 100\n if len(loss_neg) > n_neg:\n loss_neg, _ = torch.topk(loss_neg, n_neg)\n\n return (loss_pos + loss_neg.sum()) / (n_pos + n_neg).float()\n\n def fourier2poly(self, real_maps, imag_maps):\n \"\"\"Transform Fourier coefficient maps to polygon maps.\n\n Args:\n real_maps (tensor): A map composed of the real parts of the\n Fourier coefficients, whose shape is (-1, 2k+1)\n imag_maps (tensor):A map composed of the imag parts of the\n Fourier coefficients, whose shape is (-1, 2k+1)\n\n Returns\n x_maps (tensor): A map composed of the x value of the polygon\n represented by n sample points (xn, yn), whose shape is (-1, n)\n y_maps (tensor): A map composed of the y value of the polygon\n represented by n sample points (xn, yn), whose shape is (-1, n)\n \"\"\"\n\n device = real_maps.device\n\n k_vect = torch.arange(\n -self.fourier_degree,\n self.fourier_degree + 1,\n dtype=torch.float,\n device=device).view(-1, 1)\n i_vect = torch.arange(\n 0, self.num_sample, dtype=torch.float, device=device).view(1, -1)\n\n transform_matrix = 2 * np.pi / self.num_sample * torch.mm(\n k_vect, i_vect)\n\n x1 = torch.einsum('ak, kn-> an', real_maps,\n torch.cos(transform_matrix))\n x2 = torch.einsum('ak, kn-> an', imag_maps,\n torch.sin(transform_matrix))\n y1 = torch.einsum('ak, kn-> an', real_maps,\n torch.sin(transform_matrix))\n y2 = torch.einsum('ak, kn-> an', imag_maps,\n torch.cos(transform_matrix))\n\n x_maps = x1 - x2\n y_maps = y1 + y2\n\n return x_maps, y_maps\n", "path": "mmocr/models/textdet/losses/fce_loss.py"}], "after_files": [{"content": "import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmdet.core import multi_apply\nfrom mmdet.models.builder import LOSSES\nfrom torch import nn\n\n\[email protected]_module()\nclass FCELoss(nn.Module):\n \"\"\"The class for implementing FCENet loss\n FCENet(CVPR2021): Fourier Contour Embedding for Arbitrary-shaped\n Text Detection\n\n [https://arxiv.org/abs/2104.10442]\n\n Args:\n fourier_degree (int) : The maximum Fourier transform degree k.\n num_sample (int) : The sampling points number of regression\n loss. If it is too small, fcenet tends to be overfitting.\n ohem_ratio (float): the negative/positive ratio in OHEM.\n \"\"\"\n\n def __init__(self, fourier_degree, num_sample, ohem_ratio=3.):\n super().__init__()\n self.fourier_degree = fourier_degree\n self.num_sample = num_sample\n self.ohem_ratio = ohem_ratio\n\n def forward(self, preds, _, p3_maps, p4_maps, p5_maps):\n assert isinstance(preds, list)\n assert p3_maps[0].shape[0] == 4 * self.fourier_degree + 5,\\\n 'fourier degree not equal in FCEhead and FCEtarget'\n\n device = preds[0][0].device\n # to tensor\n gts = [p3_maps, p4_maps, p5_maps]\n for idx, maps in enumerate(gts):\n gts[idx] = torch.from_numpy(np.stack(maps)).float().to(device)\n\n losses = multi_apply(self.forward_single, preds, gts)\n\n loss_tr = torch.tensor(0., device=device).float()\n loss_tcl = torch.tensor(0., device=device).float()\n loss_reg_x = torch.tensor(0., device=device).float()\n loss_reg_y = torch.tensor(0., device=device).float()\n\n for idx, loss in enumerate(losses):\n if idx == 0:\n loss_tr += sum(loss)\n elif idx == 1:\n loss_tcl += sum(loss)\n elif idx == 2:\n loss_reg_x += sum(loss)\n else:\n loss_reg_y += sum(loss)\n\n results = dict(\n loss_text=loss_tr,\n loss_center=loss_tcl,\n loss_reg_x=loss_reg_x,\n loss_reg_y=loss_reg_y,\n )\n\n return results\n\n def forward_single(self, pred, gt):\n cls_pred = pred[0].permute(0, 2, 3, 1).contiguous()\n reg_pred = pred[1].permute(0, 2, 3, 1).contiguous()\n gt = gt.permute(0, 2, 3, 1).contiguous()\n\n k = 2 * self.fourier_degree + 1\n tr_pred = cls_pred[:, :, :, :2].view(-1, 2)\n tcl_pred = cls_pred[:, :, :, 2:].view(-1, 2)\n x_pred = reg_pred[:, :, :, 0:k].view(-1, k)\n y_pred = reg_pred[:, :, :, k:2 * k].view(-1, k)\n\n tr_mask = gt[:, :, :, :1].view(-1)\n tcl_mask = gt[:, :, :, 1:2].view(-1)\n train_mask = gt[:, :, :, 2:3].view(-1)\n x_map = gt[:, :, :, 3:3 + k].view(-1, k)\n y_map = gt[:, :, :, 3 + k:].view(-1, k)\n\n tr_train_mask = train_mask * tr_mask\n device = x_map.device\n # tr loss\n loss_tr = self.ohem(tr_pred, tr_mask.long(), train_mask.long())\n\n # tcl loss\n loss_tcl = torch.tensor(0.).float().to(device)\n tr_neg_mask = 1 - tr_train_mask\n if tr_train_mask.sum().item() > 0:\n loss_tcl_pos = F.cross_entropy(\n tcl_pred[tr_train_mask.bool()],\n tcl_mask[tr_train_mask.bool()].long())\n loss_tcl_neg = F.cross_entropy(tcl_pred[tr_neg_mask.bool()],\n tcl_mask[tr_neg_mask.bool()].long())\n loss_tcl = loss_tcl_pos + 0.5 * loss_tcl_neg\n\n # regression loss\n loss_reg_x = torch.tensor(0.).float().to(device)\n loss_reg_y = torch.tensor(0.).float().to(device)\n if tr_train_mask.sum().item() > 0:\n weight = (tr_mask[tr_train_mask.bool()].float() +\n tcl_mask[tr_train_mask.bool()].float()) / 2\n weight = weight.contiguous().view(-1, 1)\n\n ft_x, ft_y = self.fourier2poly(x_map, y_map)\n ft_x_pre, ft_y_pre = self.fourier2poly(x_pred, y_pred)\n\n loss_reg_x = torch.mean(weight * F.smooth_l1_loss(\n ft_x_pre[tr_train_mask.bool()],\n ft_x[tr_train_mask.bool()],\n reduction='none'))\n loss_reg_y = torch.mean(weight * F.smooth_l1_loss(\n ft_y_pre[tr_train_mask.bool()],\n ft_y[tr_train_mask.bool()],\n reduction='none'))\n\n return loss_tr, loss_tcl, loss_reg_x, loss_reg_y\n\n def ohem(self, predict, target, train_mask):\n device = train_mask.device\n pos = (target * train_mask).bool()\n neg = ((1 - target) * train_mask).bool()\n\n n_pos = pos.float().sum()\n\n if n_pos.item() > 0:\n loss_pos = F.cross_entropy(\n predict[pos], target[pos], reduction='sum')\n loss_neg = F.cross_entropy(\n predict[neg], target[neg], reduction='none')\n n_neg = min(\n int(neg.float().sum().item()),\n int(self.ohem_ratio * n_pos.float()))\n else:\n loss_pos = torch.tensor(0.).to(device)\n loss_neg = F.cross_entropy(\n predict[neg], target[neg], reduction='none')\n n_neg = 100\n if len(loss_neg) > n_neg:\n loss_neg, _ = torch.topk(loss_neg, n_neg)\n\n return (loss_pos + loss_neg.sum()) / (n_pos + n_neg).float()\n\n def fourier2poly(self, real_maps, imag_maps):\n \"\"\"Transform Fourier coefficient maps to polygon maps.\n\n Args:\n real_maps (tensor): A map composed of the real parts of the\n Fourier coefficients, whose shape is (-1, 2k+1)\n imag_maps (tensor):A map composed of the imag parts of the\n Fourier coefficients, whose shape is (-1, 2k+1)\n\n Returns\n x_maps (tensor): A map composed of the x value of the polygon\n represented by n sample points (xn, yn), whose shape is (-1, n)\n y_maps (tensor): A map composed of the y value of the polygon\n represented by n sample points (xn, yn), whose shape is (-1, n)\n \"\"\"\n\n device = real_maps.device\n\n k_vect = torch.arange(\n -self.fourier_degree,\n self.fourier_degree + 1,\n dtype=torch.float,\n device=device).view(-1, 1)\n i_vect = torch.arange(\n 0, self.num_sample, dtype=torch.float, device=device).view(1, -1)\n\n transform_matrix = 2 * np.pi / self.num_sample * torch.mm(\n k_vect, i_vect)\n\n x1 = torch.einsum('ak, kn-> an', real_maps,\n torch.cos(transform_matrix))\n x2 = torch.einsum('ak, kn-> an', imag_maps,\n torch.sin(transform_matrix))\n y1 = torch.einsum('ak, kn-> an', real_maps,\n torch.sin(transform_matrix))\n y2 = torch.einsum('ak, kn-> an', imag_maps,\n torch.cos(transform_matrix))\n\n x_maps = x1 - x2\n y_maps = y1 + y2\n\n return x_maps, y_maps\n", "path": "mmocr/models/textdet/losses/fce_loss.py"}]}
| 2,686 | 230 |
gh_patches_debug_43238
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__datumaro-689
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[WIP]Fix yolo format output path
### Summary
delete 'data' folder from path because it is not generated
### Checklist
<!-- Put an 'x' in all the boxes that apply -->
- [ ] I submit my changes into the `develop` branch
- [ ] I have added description of my changes into [CHANGELOG](https://github.com/openvinotoolkit/datumaro/blob/develop/CHANGELOG.md)
- [ ] I have updated the [documentation](
https://github.com/openvinotoolkit/datumaro/tree/develop/docs) accordingly
- [ ] I have added tests to cover my changes
- [ ] I have [linked related issues](
https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword)
### License
- [ ] I submit _my code changes_ under the same [MIT License](
https://github.com/openvinotoolkit/datumaro/blob/develop/LICENSE) that covers the project.
Feel free to contact the maintainers if that's a concern.
- [ ] I have updated the license header for each file (see an example below)
```python
# Copyright (C) 2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datumaro/plugins/yolo_format/converter.py`
Content:
```
1 # Copyright (C) 2019-2022 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 import logging as log
6 import os
7 import os.path as osp
8 from collections import OrderedDict
9
10 from datumaro.components.annotation import AnnotationType, Bbox
11 from datumaro.components.converter import Converter
12 from datumaro.components.dataset import ItemStatus
13 from datumaro.components.errors import MediaTypeError
14 from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem
15 from datumaro.components.media import Image
16
17 from .format import YoloPath
18
19
20 def _make_yolo_bbox(img_size, box):
21 # https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py
22 # <x> <y> <width> <height> - values relative to width and height of image
23 # <x> <y> - are center of rectangle
24 x = (box[0] + box[2]) / 2 / img_size[0]
25 y = (box[1] + box[3]) / 2 / img_size[1]
26 w = (box[2] - box[0]) / img_size[0]
27 h = (box[3] - box[1]) / img_size[1]
28 return x, y, w, h
29
30
31 class YoloConverter(Converter):
32 # https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects
33 DEFAULT_IMAGE_EXT = ".jpg"
34
35 def apply(self):
36 extractor = self._extractor
37 save_dir = self._save_dir
38
39 if self._extractor.media_type() and not issubclass(self._extractor.media_type(), Image):
40 raise MediaTypeError("Media type is not an image")
41
42 os.makedirs(save_dir, exist_ok=True)
43
44 if self._save_dataset_meta:
45 self._save_meta_file(self._save_dir)
46
47 label_categories = extractor.categories()[AnnotationType.label]
48 label_ids = {label.name: idx for idx, label in enumerate(label_categories.items)}
49 with open(osp.join(save_dir, "obj.names"), "w", encoding="utf-8") as f:
50 f.writelines("%s\n" % l[0] for l in sorted(label_ids.items(), key=lambda x: x[1]))
51
52 subset_lists = OrderedDict()
53
54 subsets = self._extractor.subsets()
55 pbars = self._ctx.progress_reporter.split(len(subsets))
56 for (subset_name, subset), pbar in zip(subsets.items(), pbars):
57 if not subset_name or subset_name == DEFAULT_SUBSET_NAME:
58 subset_name = YoloPath.DEFAULT_SUBSET_NAME
59 elif subset_name not in YoloPath.SUBSET_NAMES:
60 log.warning(
61 "Skipping subset export '%s'. "
62 "If specified, the only valid names are %s"
63 % (subset_name, ", ".join("'%s'" % s for s in YoloPath.SUBSET_NAMES))
64 )
65 continue
66
67 subset_dir = osp.join(save_dir, "obj_%s_data" % subset_name)
68 os.makedirs(subset_dir, exist_ok=True)
69
70 image_paths = OrderedDict()
71 for item in pbar.iter(subset, desc=f"Exporting '{subset_name}'"):
72 try:
73 if not item.media or not (item.media.has_data or item.media.has_size):
74 raise Exception(
75 "Failed to export item '%s': " "item has no image info" % item.id
76 )
77
78 image_name = self._make_image_filename(item)
79 if self._save_media:
80 if item.media:
81 self._save_image(item, osp.join(subset_dir, image_name))
82 else:
83 log.warning("Item '%s' has no image" % item.id)
84 image_paths[item.id] = osp.join("data", osp.basename(subset_dir), image_name)
85
86 yolo_annotation = self._export_item_annotation(item)
87 annotation_path = osp.join(subset_dir, "%s.txt" % item.id)
88 os.makedirs(osp.dirname(annotation_path), exist_ok=True)
89 with open(annotation_path, "w", encoding="utf-8") as f:
90 f.write(yolo_annotation)
91 except Exception as e:
92 self._report_item_error(e, item_id=(item.id, item.subset))
93
94 subset_list_name = "%s.txt" % subset_name
95 subset_list_path = osp.join(save_dir, subset_list_name)
96 if self._patch and subset_name in self._patch.updated_subsets and not image_paths:
97 if osp.isfile(subset_list_path):
98 os.remove(subset_list_path)
99 continue
100
101 subset_lists[subset_name] = subset_list_name
102 with open(subset_list_path, "w", encoding="utf-8") as f:
103 f.writelines("%s\n" % s for s in image_paths.values())
104
105 with open(osp.join(save_dir, "obj.data"), "w", encoding="utf-8") as f:
106 f.write("classes = %s\n" % len(label_ids))
107
108 for subset_name, subset_list_name in subset_lists.items():
109 f.write("%s = %s\n" % (subset_name, osp.join("data", subset_list_name)))
110
111 f.write("names = %s\n" % osp.join("data", "obj.names"))
112 f.write("backup = backup/\n")
113
114 def _export_item_annotation(self, item):
115 height, width = item.media.size
116
117 yolo_annotation = ""
118
119 for bbox in item.annotations:
120 if not isinstance(bbox, Bbox) or bbox.label is None:
121 continue
122
123 yolo_bb = _make_yolo_bbox((width, height), bbox.points)
124 yolo_bb = " ".join("%.6f" % p for p in yolo_bb)
125 yolo_annotation += "%s %s\n" % (bbox.label, yolo_bb)
126
127 return yolo_annotation
128
129 @classmethod
130 def patch(cls, dataset, patch, save_dir, **kwargs):
131 conv = cls(dataset, save_dir=save_dir, **kwargs)
132 conv._patch = patch
133 conv.apply()
134
135 for (item_id, subset), status in patch.updated_items.items():
136 if status != ItemStatus.removed:
137 item = patch.data.get(item_id, subset)
138 else:
139 item = DatasetItem(item_id, subset=subset)
140
141 if not (status == ItemStatus.removed or not item.media):
142 continue
143
144 if subset == DEFAULT_SUBSET_NAME:
145 subset = YoloPath.DEFAULT_SUBSET_NAME
146 subset_dir = osp.join(save_dir, "obj_%s_data" % subset)
147
148 image_path = osp.join(subset_dir, conv._make_image_filename(item))
149 if osp.isfile(image_path):
150 os.remove(image_path)
151
152 ann_path = osp.join(subset_dir, "%s.txt" % item.id)
153 if osp.isfile(ann_path):
154 os.remove(ann_path)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datumaro/plugins/yolo_format/converter.py b/datumaro/plugins/yolo_format/converter.py
--- a/datumaro/plugins/yolo_format/converter.py
+++ b/datumaro/plugins/yolo_format/converter.py
@@ -11,8 +11,9 @@
from datumaro.components.converter import Converter
from datumaro.components.dataset import ItemStatus
from datumaro.components.errors import MediaTypeError
-from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem
+from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem, IExtractor
from datumaro.components.media import Image
+from datumaro.util import str_to_bool
from .format import YoloPath
@@ -32,6 +33,24 @@
# https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects
DEFAULT_IMAGE_EXT = ".jpg"
+ @classmethod
+ def build_cmdline_parser(cls, **kwargs):
+ parser = super().build_cmdline_parser(**kwargs)
+ parser.add_argument(
+ "--add-path-prefix",
+ default=True,
+ type=str_to_bool,
+ help="Add the 'data/' prefix for paths in the dataset info (default: %(default)s)",
+ )
+ return parser
+
+ def __init__(
+ self, extractor: IExtractor, save_dir: str, *, add_path_prefix: bool = True, **kwargs
+ ) -> None:
+ super().__init__(extractor, save_dir, **kwargs)
+
+ self._prefix = "data" if add_path_prefix else ""
+
def apply(self):
extractor = self._extractor
save_dir = self._save_dir
@@ -81,7 +100,9 @@
self._save_image(item, osp.join(subset_dir, image_name))
else:
log.warning("Item '%s' has no image" % item.id)
- image_paths[item.id] = osp.join("data", osp.basename(subset_dir), image_name)
+ image_paths[item.id] = osp.join(
+ self._prefix, osp.basename(subset_dir), image_name
+ )
yolo_annotation = self._export_item_annotation(item)
annotation_path = osp.join(subset_dir, "%s.txt" % item.id)
@@ -91,7 +112,7 @@
except Exception as e:
self._report_item_error(e, item_id=(item.id, item.subset))
- subset_list_name = "%s.txt" % subset_name
+ subset_list_name = f"{subset_name}.txt"
subset_list_path = osp.join(save_dir, subset_list_name)
if self._patch and subset_name in self._patch.updated_subsets and not image_paths:
if osp.isfile(subset_list_path):
@@ -100,15 +121,18 @@
subset_lists[subset_name] = subset_list_name
with open(subset_list_path, "w", encoding="utf-8") as f:
- f.writelines("%s\n" % s for s in image_paths.values())
+ f.writelines("%s\n" % s.replace("\\", "/") for s in image_paths.values())
with open(osp.join(save_dir, "obj.data"), "w", encoding="utf-8") as f:
- f.write("classes = %s\n" % len(label_ids))
+ f.write(f"classes = {len(label_ids)}\n")
for subset_name, subset_list_name in subset_lists.items():
- f.write("%s = %s\n" % (subset_name, osp.join("data", subset_list_name)))
+ f.write(
+ "%s = %s\n"
+ % (subset_name, osp.join(self._prefix, subset_list_name).replace("\\", "/"))
+ )
- f.write("names = %s\n" % osp.join("data", "obj.names"))
+ f.write("names = %s\n" % osp.join(self._prefix, "obj.names"))
f.write("backup = backup/\n")
def _export_item_annotation(self, item):
|
{"golden_diff": "diff --git a/datumaro/plugins/yolo_format/converter.py b/datumaro/plugins/yolo_format/converter.py\n--- a/datumaro/plugins/yolo_format/converter.py\n+++ b/datumaro/plugins/yolo_format/converter.py\n@@ -11,8 +11,9 @@\n from datumaro.components.converter import Converter\n from datumaro.components.dataset import ItemStatus\n from datumaro.components.errors import MediaTypeError\n-from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem\n+from datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem, IExtractor\n from datumaro.components.media import Image\n+from datumaro.util import str_to_bool\n \n from .format import YoloPath\n \n@@ -32,6 +33,24 @@\n # https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects\n DEFAULT_IMAGE_EXT = \".jpg\"\n \n+ @classmethod\n+ def build_cmdline_parser(cls, **kwargs):\n+ parser = super().build_cmdline_parser(**kwargs)\n+ parser.add_argument(\n+ \"--add-path-prefix\",\n+ default=True,\n+ type=str_to_bool,\n+ help=\"Add the 'data/' prefix for paths in the dataset info (default: %(default)s)\",\n+ )\n+ return parser\n+\n+ def __init__(\n+ self, extractor: IExtractor, save_dir: str, *, add_path_prefix: bool = True, **kwargs\n+ ) -> None:\n+ super().__init__(extractor, save_dir, **kwargs)\n+\n+ self._prefix = \"data\" if add_path_prefix else \"\"\n+\n def apply(self):\n extractor = self._extractor\n save_dir = self._save_dir\n@@ -81,7 +100,9 @@\n self._save_image(item, osp.join(subset_dir, image_name))\n else:\n log.warning(\"Item '%s' has no image\" % item.id)\n- image_paths[item.id] = osp.join(\"data\", osp.basename(subset_dir), image_name)\n+ image_paths[item.id] = osp.join(\n+ self._prefix, osp.basename(subset_dir), image_name\n+ )\n \n yolo_annotation = self._export_item_annotation(item)\n annotation_path = osp.join(subset_dir, \"%s.txt\" % item.id)\n@@ -91,7 +112,7 @@\n except Exception as e:\n self._report_item_error(e, item_id=(item.id, item.subset))\n \n- subset_list_name = \"%s.txt\" % subset_name\n+ subset_list_name = f\"{subset_name}.txt\"\n subset_list_path = osp.join(save_dir, subset_list_name)\n if self._patch and subset_name in self._patch.updated_subsets and not image_paths:\n if osp.isfile(subset_list_path):\n@@ -100,15 +121,18 @@\n \n subset_lists[subset_name] = subset_list_name\n with open(subset_list_path, \"w\", encoding=\"utf-8\") as f:\n- f.writelines(\"%s\\n\" % s for s in image_paths.values())\n+ f.writelines(\"%s\\n\" % s.replace(\"\\\\\", \"/\") for s in image_paths.values())\n \n with open(osp.join(save_dir, \"obj.data\"), \"w\", encoding=\"utf-8\") as f:\n- f.write(\"classes = %s\\n\" % len(label_ids))\n+ f.write(f\"classes = {len(label_ids)}\\n\")\n \n for subset_name, subset_list_name in subset_lists.items():\n- f.write(\"%s = %s\\n\" % (subset_name, osp.join(\"data\", subset_list_name)))\n+ f.write(\n+ \"%s = %s\\n\"\n+ % (subset_name, osp.join(self._prefix, subset_list_name).replace(\"\\\\\", \"/\"))\n+ )\n \n- f.write(\"names = %s\\n\" % osp.join(\"data\", \"obj.names\"))\n+ f.write(\"names = %s\\n\" % osp.join(self._prefix, \"obj.names\"))\n f.write(\"backup = backup/\\n\")\n \n def _export_item_annotation(self, item):\n", "issue": "[WIP]Fix yolo format output path\n\r\n\r\n### Summary\r\ndelete 'data' folder from path because it is not generated\r\n\r\n\r\n\r\n### Checklist\r\n<!-- Put an 'x' in all the boxes that apply -->\r\n- [ ] I submit my changes into the `develop` branch\r\n- [ ] I have added description of my changes into [CHANGELOG](https://github.com/openvinotoolkit/datumaro/blob/develop/CHANGELOG.md)\r\n- [ ] I have updated the [documentation](\r\n https://github.com/openvinotoolkit/datumaro/tree/develop/docs) accordingly\r\n- [ ] I have added tests to cover my changes\r\n- [ ] I have [linked related issues](\r\n https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword)\r\n\r\n### License\r\n\r\n- [ ] I submit _my code changes_ under the same [MIT License](\r\n https://github.com/openvinotoolkit/datumaro/blob/develop/LICENSE) that covers the project.\r\n Feel free to contact the maintainers if that's a concern.\r\n- [ ] I have updated the license header for each file (see an example below)\r\n\r\n```python\r\n# Copyright (C) 2021 Intel Corporation\r\n#\r\n# SPDX-License-Identifier: MIT\r\n```\r\n\n", "before_files": [{"content": "# Copyright (C) 2019-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nfrom datumaro.components.annotation import AnnotationType, Bbox\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.dataset import ItemStatus\nfrom datumaro.components.errors import MediaTypeError\nfrom datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem\nfrom datumaro.components.media import Image\n\nfrom .format import YoloPath\n\n\ndef _make_yolo_bbox(img_size, box):\n # https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py\n # <x> <y> <width> <height> - values relative to width and height of image\n # <x> <y> - are center of rectangle\n x = (box[0] + box[2]) / 2 / img_size[0]\n y = (box[1] + box[3]) / 2 / img_size[1]\n w = (box[2] - box[0]) / img_size[0]\n h = (box[3] - box[1]) / img_size[1]\n return x, y, w, h\n\n\nclass YoloConverter(Converter):\n # https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects\n DEFAULT_IMAGE_EXT = \".jpg\"\n\n def apply(self):\n extractor = self._extractor\n save_dir = self._save_dir\n\n if self._extractor.media_type() and not issubclass(self._extractor.media_type(), Image):\n raise MediaTypeError(\"Media type is not an image\")\n\n os.makedirs(save_dir, exist_ok=True)\n\n if self._save_dataset_meta:\n self._save_meta_file(self._save_dir)\n\n label_categories = extractor.categories()[AnnotationType.label]\n label_ids = {label.name: idx for idx, label in enumerate(label_categories.items)}\n with open(osp.join(save_dir, \"obj.names\"), \"w\", encoding=\"utf-8\") as f:\n f.writelines(\"%s\\n\" % l[0] for l in sorted(label_ids.items(), key=lambda x: x[1]))\n\n subset_lists = OrderedDict()\n\n subsets = self._extractor.subsets()\n pbars = self._ctx.progress_reporter.split(len(subsets))\n for (subset_name, subset), pbar in zip(subsets.items(), pbars):\n if not subset_name or subset_name == DEFAULT_SUBSET_NAME:\n subset_name = YoloPath.DEFAULT_SUBSET_NAME\n elif subset_name not in YoloPath.SUBSET_NAMES:\n log.warning(\n \"Skipping subset export '%s'. \"\n \"If specified, the only valid names are %s\"\n % (subset_name, \", \".join(\"'%s'\" % s for s in YoloPath.SUBSET_NAMES))\n )\n continue\n\n subset_dir = osp.join(save_dir, \"obj_%s_data\" % subset_name)\n os.makedirs(subset_dir, exist_ok=True)\n\n image_paths = OrderedDict()\n for item in pbar.iter(subset, desc=f\"Exporting '{subset_name}'\"):\n try:\n if not item.media or not (item.media.has_data or item.media.has_size):\n raise Exception(\n \"Failed to export item '%s': \" \"item has no image info\" % item.id\n )\n\n image_name = self._make_image_filename(item)\n if self._save_media:\n if item.media:\n self._save_image(item, osp.join(subset_dir, image_name))\n else:\n log.warning(\"Item '%s' has no image\" % item.id)\n image_paths[item.id] = osp.join(\"data\", osp.basename(subset_dir), image_name)\n\n yolo_annotation = self._export_item_annotation(item)\n annotation_path = osp.join(subset_dir, \"%s.txt\" % item.id)\n os.makedirs(osp.dirname(annotation_path), exist_ok=True)\n with open(annotation_path, \"w\", encoding=\"utf-8\") as f:\n f.write(yolo_annotation)\n except Exception as e:\n self._report_item_error(e, item_id=(item.id, item.subset))\n\n subset_list_name = \"%s.txt\" % subset_name\n subset_list_path = osp.join(save_dir, subset_list_name)\n if self._patch and subset_name in self._patch.updated_subsets and not image_paths:\n if osp.isfile(subset_list_path):\n os.remove(subset_list_path)\n continue\n\n subset_lists[subset_name] = subset_list_name\n with open(subset_list_path, \"w\", encoding=\"utf-8\") as f:\n f.writelines(\"%s\\n\" % s for s in image_paths.values())\n\n with open(osp.join(save_dir, \"obj.data\"), \"w\", encoding=\"utf-8\") as f:\n f.write(\"classes = %s\\n\" % len(label_ids))\n\n for subset_name, subset_list_name in subset_lists.items():\n f.write(\"%s = %s\\n\" % (subset_name, osp.join(\"data\", subset_list_name)))\n\n f.write(\"names = %s\\n\" % osp.join(\"data\", \"obj.names\"))\n f.write(\"backup = backup/\\n\")\n\n def _export_item_annotation(self, item):\n height, width = item.media.size\n\n yolo_annotation = \"\"\n\n for bbox in item.annotations:\n if not isinstance(bbox, Bbox) or bbox.label is None:\n continue\n\n yolo_bb = _make_yolo_bbox((width, height), bbox.points)\n yolo_bb = \" \".join(\"%.6f\" % p for p in yolo_bb)\n yolo_annotation += \"%s %s\\n\" % (bbox.label, yolo_bb)\n\n return yolo_annotation\n\n @classmethod\n def patch(cls, dataset, patch, save_dir, **kwargs):\n conv = cls(dataset, save_dir=save_dir, **kwargs)\n conv._patch = patch\n conv.apply()\n\n for (item_id, subset), status in patch.updated_items.items():\n if status != ItemStatus.removed:\n item = patch.data.get(item_id, subset)\n else:\n item = DatasetItem(item_id, subset=subset)\n\n if not (status == ItemStatus.removed or not item.media):\n continue\n\n if subset == DEFAULT_SUBSET_NAME:\n subset = YoloPath.DEFAULT_SUBSET_NAME\n subset_dir = osp.join(save_dir, \"obj_%s_data\" % subset)\n\n image_path = osp.join(subset_dir, conv._make_image_filename(item))\n if osp.isfile(image_path):\n os.remove(image_path)\n\n ann_path = osp.join(subset_dir, \"%s.txt\" % item.id)\n if osp.isfile(ann_path):\n os.remove(ann_path)\n", "path": "datumaro/plugins/yolo_format/converter.py"}], "after_files": [{"content": "# Copyright (C) 2019-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nfrom datumaro.components.annotation import AnnotationType, Bbox\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.dataset import ItemStatus\nfrom datumaro.components.errors import MediaTypeError\nfrom datumaro.components.extractor import DEFAULT_SUBSET_NAME, DatasetItem, IExtractor\nfrom datumaro.components.media import Image\nfrom datumaro.util import str_to_bool\n\nfrom .format import YoloPath\n\n\ndef _make_yolo_bbox(img_size, box):\n # https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py\n # <x> <y> <width> <height> - values relative to width and height of image\n # <x> <y> - are center of rectangle\n x = (box[0] + box[2]) / 2 / img_size[0]\n y = (box[1] + box[3]) / 2 / img_size[1]\n w = (box[2] - box[0]) / img_size[0]\n h = (box[3] - box[1]) / img_size[1]\n return x, y, w, h\n\n\nclass YoloConverter(Converter):\n # https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects\n DEFAULT_IMAGE_EXT = \".jpg\"\n\n @classmethod\n def build_cmdline_parser(cls, **kwargs):\n parser = super().build_cmdline_parser(**kwargs)\n parser.add_argument(\n \"--add-path-prefix\",\n default=True,\n type=str_to_bool,\n help=\"Add the 'data/' prefix for paths in the dataset info (default: %(default)s)\",\n )\n return parser\n\n def __init__(\n self, extractor: IExtractor, save_dir: str, *, add_path_prefix: bool = True, **kwargs\n ) -> None:\n super().__init__(extractor, save_dir, **kwargs)\n\n self._prefix = \"data\" if add_path_prefix else \"\"\n\n def apply(self):\n extractor = self._extractor\n save_dir = self._save_dir\n\n if self._extractor.media_type() and not issubclass(self._extractor.media_type(), Image):\n raise MediaTypeError(\"Media type is not an image\")\n\n os.makedirs(save_dir, exist_ok=True)\n\n if self._save_dataset_meta:\n self._save_meta_file(self._save_dir)\n\n label_categories = extractor.categories()[AnnotationType.label]\n label_ids = {label.name: idx for idx, label in enumerate(label_categories.items)}\n with open(osp.join(save_dir, \"obj.names\"), \"w\", encoding=\"utf-8\") as f:\n f.writelines(\"%s\\n\" % l[0] for l in sorted(label_ids.items(), key=lambda x: x[1]))\n\n subset_lists = OrderedDict()\n\n subsets = self._extractor.subsets()\n pbars = self._ctx.progress_reporter.split(len(subsets))\n for (subset_name, subset), pbar in zip(subsets.items(), pbars):\n if not subset_name or subset_name == DEFAULT_SUBSET_NAME:\n subset_name = YoloPath.DEFAULT_SUBSET_NAME\n elif subset_name not in YoloPath.SUBSET_NAMES:\n log.warning(\n \"Skipping subset export '%s'. \"\n \"If specified, the only valid names are %s\"\n % (subset_name, \", \".join(\"'%s'\" % s for s in YoloPath.SUBSET_NAMES))\n )\n continue\n\n subset_dir = osp.join(save_dir, \"obj_%s_data\" % subset_name)\n os.makedirs(subset_dir, exist_ok=True)\n\n image_paths = OrderedDict()\n for item in pbar.iter(subset, desc=f\"Exporting '{subset_name}'\"):\n try:\n if not item.media or not (item.media.has_data or item.media.has_size):\n raise Exception(\n \"Failed to export item '%s': \" \"item has no image info\" % item.id\n )\n\n image_name = self._make_image_filename(item)\n if self._save_media:\n if item.media:\n self._save_image(item, osp.join(subset_dir, image_name))\n else:\n log.warning(\"Item '%s' has no image\" % item.id)\n image_paths[item.id] = osp.join(\n self._prefix, osp.basename(subset_dir), image_name\n )\n\n yolo_annotation = self._export_item_annotation(item)\n annotation_path = osp.join(subset_dir, \"%s.txt\" % item.id)\n os.makedirs(osp.dirname(annotation_path), exist_ok=True)\n with open(annotation_path, \"w\", encoding=\"utf-8\") as f:\n f.write(yolo_annotation)\n except Exception as e:\n self._report_item_error(e, item_id=(item.id, item.subset))\n\n subset_list_name = f\"{subset_name}.txt\"\n subset_list_path = osp.join(save_dir, subset_list_name)\n if self._patch and subset_name in self._patch.updated_subsets and not image_paths:\n if osp.isfile(subset_list_path):\n os.remove(subset_list_path)\n continue\n\n subset_lists[subset_name] = subset_list_name\n with open(subset_list_path, \"w\", encoding=\"utf-8\") as f:\n f.writelines(\"%s\\n\" % s.replace(\"\\\\\", \"/\") for s in image_paths.values())\n\n with open(osp.join(save_dir, \"obj.data\"), \"w\", encoding=\"utf-8\") as f:\n f.write(f\"classes = {len(label_ids)}\\n\")\n\n for subset_name, subset_list_name in subset_lists.items():\n f.write(\n \"%s = %s\\n\"\n % (subset_name, osp.join(self._prefix, subset_list_name).replace(\"\\\\\", \"/\"))\n )\n\n f.write(\"names = %s\\n\" % osp.join(self._prefix, \"obj.names\"))\n f.write(\"backup = backup/\\n\")\n\n def _export_item_annotation(self, item):\n height, width = item.media.size\n\n yolo_annotation = \"\"\n\n for bbox in item.annotations:\n if not isinstance(bbox, Bbox) or bbox.label is None:\n continue\n\n yolo_bb = _make_yolo_bbox((width, height), bbox.points)\n yolo_bb = \" \".join(\"%.6f\" % p for p in yolo_bb)\n yolo_annotation += \"%s %s\\n\" % (bbox.label, yolo_bb)\n\n return yolo_annotation\n\n @classmethod\n def patch(cls, dataset, patch, save_dir, **kwargs):\n conv = cls(dataset, save_dir=save_dir, **kwargs)\n conv._patch = patch\n conv.apply()\n\n for (item_id, subset), status in patch.updated_items.items():\n if status != ItemStatus.removed:\n item = patch.data.get(item_id, subset)\n else:\n item = DatasetItem(item_id, subset=subset)\n\n if not (status == ItemStatus.removed or not item.media):\n continue\n\n if subset == DEFAULT_SUBSET_NAME:\n subset = YoloPath.DEFAULT_SUBSET_NAME\n subset_dir = osp.join(save_dir, \"obj_%s_data\" % subset)\n\n image_path = osp.join(subset_dir, conv._make_image_filename(item))\n if osp.isfile(image_path):\n os.remove(image_path)\n\n ann_path = osp.join(subset_dir, \"%s.txt\" % item.id)\n if osp.isfile(ann_path):\n os.remove(ann_path)\n", "path": "datumaro/plugins/yolo_format/converter.py"}]}
| 2,400 | 906 |
gh_patches_debug_18222
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-1908
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st.map() fails with zoom error for small number of points
# Summary
In this [forum post](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950), the user highlights that `st.map()` does not plot a single point for the map. To troubleshoot, I opened up the Chrome console, saw a "Zoom must be set error" message in regards to the map.
# Solution
Adding a non `None` zoom value in the `st.map()` call [resolves the issue](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950/3?u=randyzwitch). Need a heuristic for a "small" number of points where `st.map()` automatically adds an integer value for the `zoom` parameter. At larger volumes of data, `zoom =None` seems to be ok, and deck.gl infers the proper zoom level based on the bounding box of data points.
st.map() fails with zoom error for small number of points
# Summary
In this [forum post](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950), the user highlights that `st.map()` does not plot a single point for the map. To troubleshoot, I opened up the Chrome console, saw a "Zoom must be set error" message in regards to the map.
# Solution
Adding a non `None` zoom value in the `st.map()` call [resolves the issue](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950/3?u=randyzwitch). Need a heuristic for a "small" number of points where `st.map()` automatically adds an integer value for the `zoom` parameter. At larger volumes of data, `zoom =None` seems to be ok, and deck.gl infers the proper zoom level based on the bounding box of data points.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/elements/map.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """A wrapper for simple PyDeck scatter charts."""
16
17 import copy
18 import json
19 from typing import Any, Dict
20
21 import pandas as pd
22
23 from streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto
24 import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
25 from streamlit.errors import StreamlitAPIException
26
27
28 class MapMixin:
29 def map(dg, data=None, zoom=None, use_container_width=True):
30 """Display a map with points on it.
31
32 This is a wrapper around st.pydeck_chart to quickly create scatterplot
33 charts on top of a map, with auto-centering and auto-zoom.
34
35 When using this command, we advise all users to use a personal Mapbox
36 token. This ensures the map tiles used in this chart are more
37 robust. You can do this with the mapbox.token config option.
38
39 To get a token for yourself, create an account at
40 https://mapbox.com. It's free! (for moderate usage levels) See
41 https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more
42 info on how to set config options.
43
44 Parameters
45 ----------
46 data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,
47 or None
48 The data to be plotted. Must have columns called 'lat', 'lon',
49 'latitude', or 'longitude'.
50 zoom : int
51 Zoom level as specified in
52 https://wiki.openstreetmap.org/wiki/Zoom_levels
53
54 Example
55 -------
56 >>> import pandas as pd
57 >>> import numpy as np
58 >>>
59 >>> df = pd.DataFrame(
60 ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
61 ... columns=['lat', 'lon'])
62 >>>
63 >>> st.map(df)
64
65 .. output::
66 https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH
67 height: 600px
68
69 """
70 map_proto = DeckGlJsonChartProto()
71 map_proto.json = to_deckgl_json(data, zoom)
72 map_proto.use_container_width = use_container_width
73 return dg._enqueue("deck_gl_json_chart", map_proto) # type: ignore
74
75
76 # Map used as the basis for st.map.
77 _DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]
78 _DEFAULT_MAP["mapStyle"] = "mapbox://styles/mapbox/light-v10"
79
80 # Other default parameters for st.map.
81 _DEFAULT_COLOR = [200, 30, 0, 160]
82 _ZOOM_LEVELS = [
83 360,
84 180,
85 90,
86 45,
87 22.5,
88 11.25,
89 5.625,
90 2.813,
91 1.406,
92 0.703,
93 0.352,
94 0.176,
95 0.088,
96 0.044,
97 0.022,
98 0.011,
99 0.005,
100 0.003,
101 0.001,
102 0.0005,
103 ]
104
105
106 def _get_zoom_level(distance):
107 """Get the zoom level for a given distance in degrees.
108
109 See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.
110
111 Parameters
112 ----------
113 distance : float
114 How many degrees of longitude should fit in the map.
115
116 Returns
117 -------
118 int
119 The zoom level, from 0 to 29.
120
121 """
122
123 for i in range(len(_ZOOM_LEVELS) - 1):
124 if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:
125 return i
126
127
128 def to_deckgl_json(data, zoom):
129
130 if data is None or data.empty:
131 return json.dumps(_DEFAULT_MAP)
132
133 if "lat" in data:
134 lat = "lat"
135 elif "latitude" in data:
136 lat = "latitude"
137 else:
138 raise StreamlitAPIException(
139 'Map data must contain a column named "latitude" or "lat".'
140 )
141
142 if "lon" in data:
143 lon = "lon"
144 elif "longitude" in data:
145 lon = "longitude"
146 else:
147 raise StreamlitAPIException(
148 'Map data must contain a column called "longitude" or "lon".'
149 )
150
151 if data[lon].isnull().values.any() or data[lat].isnull().values.any():
152 raise StreamlitAPIException("Latitude and longitude data must be numeric.")
153
154 data = pd.DataFrame(data)
155
156 min_lat = data[lat].min()
157 max_lat = data[lat].max()
158 min_lon = data[lon].min()
159 max_lon = data[lon].max()
160 center_lat = (max_lat + min_lat) / 2.0
161 center_lon = (max_lon + min_lon) / 2.0
162 range_lon = abs(max_lon - min_lon)
163 range_lat = abs(max_lat - min_lat)
164
165 if zoom == None:
166 if range_lon > range_lat:
167 longitude_distance = range_lon
168 else:
169 longitude_distance = range_lat
170 zoom = _get_zoom_level(longitude_distance)
171
172 # "+1" because itertuples includes the row index.
173 lon_col_index = data.columns.get_loc(lon) + 1
174 lat_col_index = data.columns.get_loc(lat) + 1
175 final_data = []
176 for row in data.itertuples():
177 final_data.append(
178 {"lon": float(row[lon_col_index]), "lat": float(row[lat_col_index])}
179 )
180
181 default = copy.deepcopy(_DEFAULT_MAP)
182 default["initialViewState"]["latitude"] = center_lat
183 default["initialViewState"]["longitude"] = center_lon
184 default["initialViewState"]["zoom"] = zoom
185 default["layers"] = [
186 {
187 "@@type": "ScatterplotLayer",
188 "getPosition": "@@=[lon, lat]",
189 "getRadius": 10,
190 "radiusScale": 10,
191 "radiusMinPixels": 3,
192 "getFillColor": _DEFAULT_COLOR,
193 "data": final_data,
194 }
195 ]
196 return json.dumps(default)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py
--- a/lib/streamlit/elements/map.py
+++ b/lib/streamlit/elements/map.py
@@ -79,6 +79,7 @@
# Other default parameters for st.map.
_DEFAULT_COLOR = [200, 30, 0, 160]
+_DEFAULT_ZOOM_LEVEL = 12
_ZOOM_LEVELS = [
360,
180,
@@ -100,6 +101,7 @@
0.003,
0.001,
0.0005,
+ 0.00025,
]
@@ -116,10 +118,14 @@
Returns
-------
int
- The zoom level, from 0 to 29.
+ The zoom level, from 0 to 20.
"""
+ # For small number of points the default zoom level will be used.
+ if distance < _ZOOM_LEVELS[-1]:
+ return _DEFAULT_ZOOM_LEVEL
+
for i in range(len(_ZOOM_LEVELS) - 1):
if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:
return i
|
{"golden_diff": "diff --git a/lib/streamlit/elements/map.py b/lib/streamlit/elements/map.py\n--- a/lib/streamlit/elements/map.py\n+++ b/lib/streamlit/elements/map.py\n@@ -79,6 +79,7 @@\n \n # Other default parameters for st.map.\n _DEFAULT_COLOR = [200, 30, 0, 160]\n+_DEFAULT_ZOOM_LEVEL = 12\n _ZOOM_LEVELS = [\n 360,\n 180,\n@@ -100,6 +101,7 @@\n 0.003,\n 0.001,\n 0.0005,\n+ 0.00025,\n ]\n \n \n@@ -116,10 +118,14 @@\n Returns\n -------\n int\n- The zoom level, from 0 to 29.\n+ The zoom level, from 0 to 20.\n \n \"\"\"\n \n+ # For small number of points the default zoom level will be used.\n+ if distance < _ZOOM_LEVELS[-1]:\n+ return _DEFAULT_ZOOM_LEVEL\n+\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n", "issue": "st.map() fails with zoom error for small number of points\n# Summary\r\n\r\nIn this [forum post](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950), the user highlights that `st.map()` does not plot a single point for the map. To troubleshoot, I opened up the Chrome console, saw a \"Zoom must be set error\" message in regards to the map.\r\n\r\n# Solution\r\n\r\nAdding a non `None` zoom value in the `st.map()` call [resolves the issue](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950/3?u=randyzwitch). Need a heuristic for a \"small\" number of points where `st.map()` automatically adds an integer value for the `zoom` parameter. At larger volumes of data, `zoom =None` seems to be ok, and deck.gl infers the proper zoom level based on the bounding box of data points.\nst.map() fails with zoom error for small number of points\n# Summary\r\n\r\nIn this [forum post](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950), the user highlights that `st.map()` does not plot a single point for the map. To troubleshoot, I opened up the Chrome console, saw a \"Zoom must be set error\" message in regards to the map.\r\n\r\n# Solution\r\n\r\nAdding a non `None` zoom value in the `st.map()` call [resolves the issue](https://discuss.streamlit.io/t/plot-map-with-address-from-user-input/2950/3?u=randyzwitch). Need a heuristic for a \"small\" number of points where `st.map()` automatically adds an integer value for the `zoom` parameter. At larger volumes of data, `zoom =None` seems to be ok, and deck.gl infers the proper zoom level based on the bounding box of data points.\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A wrapper for simple PyDeck scatter charts.\"\"\"\n\nimport copy\nimport json\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\nimport streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\nfrom streamlit.errors import StreamlitAPIException\n\n\nclass MapMixin:\n def map(dg, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n map_proto = DeckGlJsonChartProto()\n map_proto.json = to_deckgl_json(data, zoom)\n map_proto.use_container_width = use_container_width\n return dg._enqueue(\"deck_gl_json_chart\", map_proto) # type: ignore\n\n\n# Map used as the basis for st.map.\n_DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]\n_DEFAULT_MAP[\"mapStyle\"] = \"mapbox://styles/mapbox/light-v10\"\n\n# Other default parameters for st.map.\n_DEFAULT_COLOR = [200, 30, 0, 160]\n_ZOOM_LEVELS = [\n 360,\n 180,\n 90,\n 45,\n 22.5,\n 11.25,\n 5.625,\n 2.813,\n 1.406,\n 0.703,\n 0.352,\n 0.176,\n 0.088,\n 0.044,\n 0.022,\n 0.011,\n 0.005,\n 0.003,\n 0.001,\n 0.0005,\n]\n\n\ndef _get_zoom_level(distance):\n \"\"\"Get the zoom level for a given distance in degrees.\n\n See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.\n\n Parameters\n ----------\n distance : float\n How many degrees of longitude should fit in the map.\n\n Returns\n -------\n int\n The zoom level, from 0 to 29.\n\n \"\"\"\n\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n\n\ndef to_deckgl_json(data, zoom):\n\n if data is None or data.empty:\n return json.dumps(_DEFAULT_MAP)\n\n if \"lat\" in data:\n lat = \"lat\"\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column named \"latitude\" or \"lat\".'\n )\n\n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column called \"longitude\" or \"lon\".'\n )\n\n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n raise StreamlitAPIException(\"Latitude and longitude data must be numeric.\")\n\n data = pd.DataFrame(data)\n\n min_lat = data[lat].min()\n max_lat = data[lat].max()\n min_lon = data[lon].min()\n max_lon = data[lon].max()\n center_lat = (max_lat + min_lat) / 2.0\n center_lon = (max_lon + min_lon) / 2.0\n range_lon = abs(max_lon - min_lon)\n range_lat = abs(max_lat - min_lat)\n\n if zoom == None:\n if range_lon > range_lat:\n longitude_distance = range_lon\n else:\n longitude_distance = range_lat\n zoom = _get_zoom_level(longitude_distance)\n\n # \"+1\" because itertuples includes the row index.\n lon_col_index = data.columns.get_loc(lon) + 1\n lat_col_index = data.columns.get_loc(lat) + 1\n final_data = []\n for row in data.itertuples():\n final_data.append(\n {\"lon\": float(row[lon_col_index]), \"lat\": float(row[lat_col_index])}\n )\n\n default = copy.deepcopy(_DEFAULT_MAP)\n default[\"initialViewState\"][\"latitude\"] = center_lat\n default[\"initialViewState\"][\"longitude\"] = center_lon\n default[\"initialViewState\"][\"zoom\"] = zoom\n default[\"layers\"] = [\n {\n \"@@type\": \"ScatterplotLayer\",\n \"getPosition\": \"@@=[lon, lat]\",\n \"getRadius\": 10,\n \"radiusScale\": 10,\n \"radiusMinPixels\": 3,\n \"getFillColor\": _DEFAULT_COLOR,\n \"data\": final_data,\n }\n ]\n return json.dumps(default)\n", "path": "lib/streamlit/elements/map.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A wrapper for simple PyDeck scatter charts.\"\"\"\n\nimport copy\nimport json\nfrom typing import Any, Dict\n\nimport pandas as pd\n\nfrom streamlit.proto.DeckGlJsonChart_pb2 import DeckGlJsonChart as DeckGlJsonChartProto\nimport streamlit.elements.deck_gl_json_chart as deck_gl_json_chart\nfrom streamlit.errors import StreamlitAPIException\n\n\nclass MapMixin:\n def map(dg, data=None, zoom=None, use_container_width=True):\n \"\"\"Display a map with points on it.\n\n This is a wrapper around st.pydeck_chart to quickly create scatterplot\n charts on top of a map, with auto-centering and auto-zoom.\n\n When using this command, we advise all users to use a personal Mapbox\n token. This ensures the map tiles used in this chart are more\n robust. You can do this with the mapbox.token config option.\n\n To get a token for yourself, create an account at\n https://mapbox.com. It's free! (for moderate usage levels) See\n https://docs.streamlit.io/en/latest/cli.html#view-all-config-options for more\n info on how to set config options.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, numpy.ndarray, Iterable, dict,\n or None\n The data to be plotted. Must have columns called 'lat', 'lon',\n 'latitude', or 'longitude'.\n zoom : int\n Zoom level as specified in\n https://wiki.openstreetmap.org/wiki/Zoom_levels\n\n Example\n -------\n >>> import pandas as pd\n >>> import numpy as np\n >>>\n >>> df = pd.DataFrame(\n ... np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n ... columns=['lat', 'lon'])\n >>>\n >>> st.map(df)\n\n .. output::\n https://share.streamlit.io/0.53.0-SULT/index.html?id=9gTiomqPEbvHY2huTLoQtH\n height: 600px\n\n \"\"\"\n map_proto = DeckGlJsonChartProto()\n map_proto.json = to_deckgl_json(data, zoom)\n map_proto.use_container_width = use_container_width\n return dg._enqueue(\"deck_gl_json_chart\", map_proto) # type: ignore\n\n\n# Map used as the basis for st.map.\n_DEFAULT_MAP = dict(deck_gl_json_chart.EMPTY_MAP) # type: Dict[str, Any]\n_DEFAULT_MAP[\"mapStyle\"] = \"mapbox://styles/mapbox/light-v10\"\n\n# Other default parameters for st.map.\n_DEFAULT_COLOR = [200, 30, 0, 160]\n_DEFAULT_ZOOM_LEVEL = 12\n_ZOOM_LEVELS = [\n 360,\n 180,\n 90,\n 45,\n 22.5,\n 11.25,\n 5.625,\n 2.813,\n 1.406,\n 0.703,\n 0.352,\n 0.176,\n 0.088,\n 0.044,\n 0.022,\n 0.011,\n 0.005,\n 0.003,\n 0.001,\n 0.0005,\n 0.00025,\n]\n\n\ndef _get_zoom_level(distance):\n \"\"\"Get the zoom level for a given distance in degrees.\n\n See https://wiki.openstreetmap.org/wiki/Zoom_levels for reference.\n\n Parameters\n ----------\n distance : float\n How many degrees of longitude should fit in the map.\n\n Returns\n -------\n int\n The zoom level, from 0 to 20.\n\n \"\"\"\n\n # For small number of points the default zoom level will be used.\n if distance < _ZOOM_LEVELS[-1]:\n return _DEFAULT_ZOOM_LEVEL\n\n for i in range(len(_ZOOM_LEVELS) - 1):\n if _ZOOM_LEVELS[i + 1] < distance <= _ZOOM_LEVELS[i]:\n return i\n\n\ndef to_deckgl_json(data, zoom):\n\n if data is None or data.empty:\n return json.dumps(_DEFAULT_MAP)\n\n if \"lat\" in data:\n lat = \"lat\"\n elif \"latitude\" in data:\n lat = \"latitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column named \"latitude\" or \"lat\".'\n )\n\n if \"lon\" in data:\n lon = \"lon\"\n elif \"longitude\" in data:\n lon = \"longitude\"\n else:\n raise StreamlitAPIException(\n 'Map data must contain a column called \"longitude\" or \"lon\".'\n )\n\n if data[lon].isnull().values.any() or data[lat].isnull().values.any():\n raise StreamlitAPIException(\"Latitude and longitude data must be numeric.\")\n\n data = pd.DataFrame(data)\n\n min_lat = data[lat].min()\n max_lat = data[lat].max()\n min_lon = data[lon].min()\n max_lon = data[lon].max()\n center_lat = (max_lat + min_lat) / 2.0\n center_lon = (max_lon + min_lon) / 2.0\n range_lon = abs(max_lon - min_lon)\n range_lat = abs(max_lat - min_lat)\n\n if zoom == None:\n if range_lon > range_lat:\n longitude_distance = range_lon\n else:\n longitude_distance = range_lat\n zoom = _get_zoom_level(longitude_distance)\n\n # \"+1\" because itertuples includes the row index.\n lon_col_index = data.columns.get_loc(lon) + 1\n lat_col_index = data.columns.get_loc(lat) + 1\n final_data = []\n for row in data.itertuples():\n final_data.append(\n {\"lon\": float(row[lon_col_index]), \"lat\": float(row[lat_col_index])}\n )\n\n default = copy.deepcopy(_DEFAULT_MAP)\n default[\"initialViewState\"][\"latitude\"] = center_lat\n default[\"initialViewState\"][\"longitude\"] = center_lon\n default[\"initialViewState\"][\"zoom\"] = zoom\n default[\"layers\"] = [\n {\n \"@@type\": \"ScatterplotLayer\",\n \"getPosition\": \"@@=[lon, lat]\",\n \"getRadius\": 10,\n \"radiusScale\": 10,\n \"radiusMinPixels\": 3,\n \"getFillColor\": _DEFAULT_COLOR,\n \"data\": final_data,\n }\n ]\n return json.dumps(default)\n", "path": "lib/streamlit/elements/map.py"}]}
| 2,735 | 300 |
gh_patches_debug_21737
|
rasdani/github-patches
|
git_diff
|
openmc-dev__openmc-1254
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add method to get total material density
For multiphysics coupling, convergence between successive iterations of OpenMC with a T/H solver may be based on the densities, in which case we will need to make the initial density used in OpenMC available to a multiphysics driver.
There is an `openmc_material_get_densities()` method to get the density of each nuclide in a material, but it would be cleaner for a multiphysics driver to avoid doing a summation on this density and instead directly get the density in g/cm^3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openmc/capi/material.py`
Content:
```
1 from collections.abc import Mapping
2 from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t
3 from weakref import WeakValueDictionary
4
5 import numpy as np
6 from numpy.ctypeslib import as_array
7
8 from openmc.exceptions import AllocationError, InvalidIDError, OpenMCError
9 from . import _dll, Nuclide
10 from .core import _FortranObjectWithID
11 from .error import _error_handler
12
13
14 __all__ = ['Material', 'materials']
15
16 # Material functions
17 _dll.openmc_extend_materials.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
18 _dll.openmc_extend_materials.restype = c_int
19 _dll.openmc_extend_materials.errcheck = _error_handler
20 _dll.openmc_get_material_index.argtypes = [c_int32, POINTER(c_int32)]
21 _dll.openmc_get_material_index.restype = c_int
22 _dll.openmc_get_material_index.errcheck = _error_handler
23 _dll.openmc_material_add_nuclide.argtypes = [
24 c_int32, c_char_p, c_double]
25 _dll.openmc_material_add_nuclide.restype = c_int
26 _dll.openmc_material_add_nuclide.errcheck = _error_handler
27 _dll.openmc_material_get_id.argtypes = [c_int32, POINTER(c_int32)]
28 _dll.openmc_material_get_id.restype = c_int
29 _dll.openmc_material_get_id.errcheck = _error_handler
30 _dll.openmc_material_get_densities.argtypes = [
31 c_int32, POINTER(POINTER(c_int)), POINTER(POINTER(c_double)),
32 POINTER(c_int)]
33 _dll.openmc_material_get_densities.restype = c_int
34 _dll.openmc_material_get_densities.errcheck = _error_handler
35 _dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]
36 _dll.openmc_material_get_volume.restype = c_int
37 _dll.openmc_material_get_volume.errcheck = _error_handler
38 _dll.openmc_material_set_density.argtypes = [c_int32, c_double, c_char_p]
39 _dll.openmc_material_set_density.restype = c_int
40 _dll.openmc_material_set_density.errcheck = _error_handler
41 _dll.openmc_material_set_densities.argtypes = [
42 c_int32, c_int, POINTER(c_char_p), POINTER(c_double)]
43 _dll.openmc_material_set_densities.restype = c_int
44 _dll.openmc_material_set_densities.errcheck = _error_handler
45 _dll.openmc_material_set_id.argtypes = [c_int32, c_int32]
46 _dll.openmc_material_set_id.restype = c_int
47 _dll.openmc_material_set_id.errcheck = _error_handler
48 _dll.openmc_material_set_volume.argtypes = [c_int32, c_double]
49 _dll.openmc_material_set_volume.restype = c_int
50 _dll.openmc_material_set_volume.errcheck = _error_handler
51 _dll.n_materials.argtypes = []
52 _dll.n_materials.restype = c_size_t
53
54
55 class Material(_FortranObjectWithID):
56 """Material stored internally.
57
58 This class exposes a material that is stored internally in the OpenMC
59 library. To obtain a view of a material with a given ID, use the
60 :data:`openmc.capi.materials` mapping.
61
62 Parameters
63 ----------
64 uid : int or None
65 Unique ID of the tally
66 new : bool
67 When `index` is None, this argument controls whether a new object is
68 created or a view to an existing object is returned.
69 index : int or None
70 Index in the `materials` array.
71
72 Attributes
73 ----------
74 id : int
75 ID of the material
76 nuclides : list of str
77 List of nuclides in the material
78 densities : numpy.ndarray
79 Array of densities in atom/b-cm
80
81 """
82 __instances = WeakValueDictionary()
83
84 def __new__(cls, uid=None, new=True, index=None):
85 mapping = materials
86 if index is None:
87 if new:
88 # Determine ID to assign
89 if uid is None:
90 uid = max(mapping, default=0) + 1
91 else:
92 if uid in mapping:
93 raise AllocationError('A material with ID={} has already '
94 'been allocated.'.format(uid))
95
96 index = c_int32()
97 _dll.openmc_extend_materials(1, index, None)
98 index = index.value
99 else:
100 index = mapping[uid]._index
101 elif index == -1:
102 # Special value indicates void material
103 return None
104
105 if index not in cls.__instances:
106 instance = super(Material, cls).__new__(cls)
107 instance._index = index
108 if uid is not None:
109 instance.id = uid
110 cls.__instances[index] = instance
111
112 return cls.__instances[index]
113
114 @property
115 def id(self):
116 mat_id = c_int32()
117 _dll.openmc_material_get_id(self._index, mat_id)
118 return mat_id.value
119
120 @id.setter
121 def id(self, mat_id):
122 _dll.openmc_material_set_id(self._index, mat_id)
123
124 @property
125 def volume(self):
126 volume = c_double()
127 try:
128 _dll.openmc_material_get_volume(self._index, volume)
129 except OpenMCError:
130 return None
131 return volume.value
132
133 @volume.setter
134 def volume(self, volume):
135 _dll.openmc_material_set_volume(self._index, volume)
136
137 @property
138 def nuclides(self):
139 return self._get_densities()[0]
140 return nuclides
141
142 @property
143 def densities(self):
144 return self._get_densities()[1]
145
146 def _get_densities(self):
147 """Get atom densities in a material.
148
149 Returns
150 -------
151 list of string
152 List of nuclide names
153 numpy.ndarray
154 Array of densities in atom/b-cm
155
156 """
157 # Allocate memory for arguments that are written to
158 nuclides = POINTER(c_int)()
159 densities = POINTER(c_double)()
160 n = c_int()
161
162 # Get nuclide names and densities
163 _dll.openmc_material_get_densities(self._index, nuclides, densities, n)
164
165 # Convert to appropriate types and return
166 nuclide_list = [Nuclide(nuclides[i]).name for i in range(n.value)]
167 density_array = as_array(densities, (n.value,))
168 return nuclide_list, density_array
169
170 def add_nuclide(self, name, density):
171 """Add a nuclide to a material.
172
173 Parameters
174 ----------
175 name : str
176 Name of nuclide, e.g. 'U235'
177 density : float
178 Density in atom/b-cm
179
180 """
181 _dll.openmc_material_add_nuclide(self._index, name.encode(), density)
182
183 def set_density(self, density, units='atom/b-cm'):
184 """Set density of a material.
185
186 Parameters
187 ----------
188 density : float
189 Density
190 units : {'atom/b-cm', 'g/cm3'}
191 Units for density
192
193 """
194 _dll.openmc_material_set_density(self._index, density, units.encode())
195
196 def set_densities(self, nuclides, densities):
197 """Set the densities of a list of nuclides in a material
198
199 Parameters
200 ----------
201 nuclides : iterable of str
202 Nuclide names
203 densities : iterable of float
204 Corresponding densities in atom/b-cm
205
206 """
207 # Convert strings to an array of char*
208 nucs = (c_char_p * len(nuclides))()
209 nucs[:] = [x.encode() for x in nuclides]
210
211 # Get numpy array as a double*
212 d = np.asarray(densities)
213 dp = d.ctypes.data_as(POINTER(c_double))
214
215 _dll.openmc_material_set_densities(self._index, len(nuclides), nucs, dp)
216
217
218 class _MaterialMapping(Mapping):
219 def __getitem__(self, key):
220 index = c_int32()
221 try:
222 _dll.openmc_get_material_index(key, index)
223 except (AllocationError, InvalidIDError) as e:
224 # __contains__ expects a KeyError to work correctly
225 raise KeyError(str(e))
226 return Material(index=index.value)
227
228 def __iter__(self):
229 for i in range(len(self)):
230 yield Material(index=i).id
231
232 def __len__(self):
233 return _dll.n_materials()
234
235 def __repr__(self):
236 return repr(dict(self))
237
238 materials = _MaterialMapping()
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openmc/capi/material.py b/openmc/capi/material.py
--- a/openmc/capi/material.py
+++ b/openmc/capi/material.py
@@ -32,6 +32,9 @@
POINTER(c_int)]
_dll.openmc_material_get_densities.restype = c_int
_dll.openmc_material_get_densities.errcheck = _error_handler
+_dll.openmc_material_get_density.argtypes = [c_int32, POINTER(c_double)]
+_dll.openmc_material_get_density.restype = c_int
+_dll.openmc_material_get_density.errcheck = _error_handler
_dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]
_dll.openmc_material_get_volume.restype = c_int
_dll.openmc_material_get_volume.errcheck = _error_handler
@@ -139,6 +142,15 @@
return self._get_densities()[0]
return nuclides
+ @property
+ def density(self):
+ density = c_double()
+ try:
+ _dll.openmc_material_get_density(self._index, density)
+ except OpenMCError:
+ return None
+ return density.value
+
@property
def densities(self):
return self._get_densities()[1]
|
{"golden_diff": "diff --git a/openmc/capi/material.py b/openmc/capi/material.py\n--- a/openmc/capi/material.py\n+++ b/openmc/capi/material.py\n@@ -32,6 +32,9 @@\n POINTER(c_int)]\n _dll.openmc_material_get_densities.restype = c_int\n _dll.openmc_material_get_densities.errcheck = _error_handler\n+_dll.openmc_material_get_density.argtypes = [c_int32, POINTER(c_double)]\n+_dll.openmc_material_get_density.restype = c_int\n+_dll.openmc_material_get_density.errcheck = _error_handler\n _dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]\n _dll.openmc_material_get_volume.restype = c_int\n _dll.openmc_material_get_volume.errcheck = _error_handler\n@@ -139,6 +142,15 @@\n return self._get_densities()[0]\n return nuclides\n \n+ @property\n+ def density(self):\n+ density = c_double()\n+ try:\n+ _dll.openmc_material_get_density(self._index, density)\n+ except OpenMCError:\n+ return None\n+ return density.value\n+\n @property\n def densities(self):\n return self._get_densities()[1]\n", "issue": "Add method to get total material density\nFor multiphysics coupling, convergence between successive iterations of OpenMC with a T/H solver may be based on the densities, in which case we will need to make the initial density used in OpenMC available to a multiphysics driver. \r\n\r\nThere is an `openmc_material_get_densities()` method to get the density of each nuclide in a material, but it would be cleaner for a multiphysics driver to avoid doing a summation on this density and instead directly get the density in g/cm^3.\n", "before_files": [{"content": "from collections.abc import Mapping\nfrom ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\nfrom weakref import WeakValueDictionary\n\nimport numpy as np\nfrom numpy.ctypeslib import as_array\n\nfrom openmc.exceptions import AllocationError, InvalidIDError, OpenMCError\nfrom . import _dll, Nuclide\nfrom .core import _FortranObjectWithID\nfrom .error import _error_handler\n\n\n__all__ = ['Material', 'materials']\n\n# Material functions\n_dll.openmc_extend_materials.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]\n_dll.openmc_extend_materials.restype = c_int\n_dll.openmc_extend_materials.errcheck = _error_handler\n_dll.openmc_get_material_index.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_get_material_index.restype = c_int\n_dll.openmc_get_material_index.errcheck = _error_handler\n_dll.openmc_material_add_nuclide.argtypes = [\n c_int32, c_char_p, c_double]\n_dll.openmc_material_add_nuclide.restype = c_int\n_dll.openmc_material_add_nuclide.errcheck = _error_handler\n_dll.openmc_material_get_id.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_material_get_id.restype = c_int\n_dll.openmc_material_get_id.errcheck = _error_handler\n_dll.openmc_material_get_densities.argtypes = [\n c_int32, POINTER(POINTER(c_int)), POINTER(POINTER(c_double)),\n POINTER(c_int)]\n_dll.openmc_material_get_densities.restype = c_int\n_dll.openmc_material_get_densities.errcheck = _error_handler\n_dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]\n_dll.openmc_material_get_volume.restype = c_int\n_dll.openmc_material_get_volume.errcheck = _error_handler\n_dll.openmc_material_set_density.argtypes = [c_int32, c_double, c_char_p]\n_dll.openmc_material_set_density.restype = c_int\n_dll.openmc_material_set_density.errcheck = _error_handler\n_dll.openmc_material_set_densities.argtypes = [\n c_int32, c_int, POINTER(c_char_p), POINTER(c_double)]\n_dll.openmc_material_set_densities.restype = c_int\n_dll.openmc_material_set_densities.errcheck = _error_handler\n_dll.openmc_material_set_id.argtypes = [c_int32, c_int32]\n_dll.openmc_material_set_id.restype = c_int\n_dll.openmc_material_set_id.errcheck = _error_handler\n_dll.openmc_material_set_volume.argtypes = [c_int32, c_double]\n_dll.openmc_material_set_volume.restype = c_int\n_dll.openmc_material_set_volume.errcheck = _error_handler\n_dll.n_materials.argtypes = []\n_dll.n_materials.restype = c_size_t\n\n\nclass Material(_FortranObjectWithID):\n \"\"\"Material stored internally.\n\n This class exposes a material that is stored internally in the OpenMC\n library. To obtain a view of a material with a given ID, use the\n :data:`openmc.capi.materials` mapping.\n\n Parameters\n ----------\n uid : int or None\n Unique ID of the tally\n new : bool\n When `index` is None, this argument controls whether a new object is\n created or a view to an existing object is returned.\n index : int or None\n Index in the `materials` array.\n\n Attributes\n ----------\n id : int\n ID of the material\n nuclides : list of str\n List of nuclides in the material\n densities : numpy.ndarray\n Array of densities in atom/b-cm\n\n \"\"\"\n __instances = WeakValueDictionary()\n\n def __new__(cls, uid=None, new=True, index=None):\n mapping = materials\n if index is None:\n if new:\n # Determine ID to assign\n if uid is None:\n uid = max(mapping, default=0) + 1\n else:\n if uid in mapping:\n raise AllocationError('A material with ID={} has already '\n 'been allocated.'.format(uid))\n\n index = c_int32()\n _dll.openmc_extend_materials(1, index, None)\n index = index.value\n else:\n index = mapping[uid]._index\n elif index == -1:\n # Special value indicates void material\n return None\n\n if index not in cls.__instances:\n instance = super(Material, cls).__new__(cls)\n instance._index = index\n if uid is not None:\n instance.id = uid\n cls.__instances[index] = instance\n\n return cls.__instances[index]\n\n @property\n def id(self):\n mat_id = c_int32()\n _dll.openmc_material_get_id(self._index, mat_id)\n return mat_id.value\n\n @id.setter\n def id(self, mat_id):\n _dll.openmc_material_set_id(self._index, mat_id)\n\n @property\n def volume(self):\n volume = c_double()\n try:\n _dll.openmc_material_get_volume(self._index, volume)\n except OpenMCError:\n return None\n return volume.value\n\n @volume.setter\n def volume(self, volume):\n _dll.openmc_material_set_volume(self._index, volume)\n\n @property\n def nuclides(self):\n return self._get_densities()[0]\n return nuclides\n\n @property\n def densities(self):\n return self._get_densities()[1]\n\n def _get_densities(self):\n \"\"\"Get atom densities in a material.\n\n Returns\n -------\n list of string\n List of nuclide names\n numpy.ndarray\n Array of densities in atom/b-cm\n\n \"\"\"\n # Allocate memory for arguments that are written to\n nuclides = POINTER(c_int)()\n densities = POINTER(c_double)()\n n = c_int()\n\n # Get nuclide names and densities\n _dll.openmc_material_get_densities(self._index, nuclides, densities, n)\n\n # Convert to appropriate types and return\n nuclide_list = [Nuclide(nuclides[i]).name for i in range(n.value)]\n density_array = as_array(densities, (n.value,))\n return nuclide_list, density_array\n\n def add_nuclide(self, name, density):\n \"\"\"Add a nuclide to a material.\n\n Parameters\n ----------\n name : str\n Name of nuclide, e.g. 'U235'\n density : float\n Density in atom/b-cm\n\n \"\"\"\n _dll.openmc_material_add_nuclide(self._index, name.encode(), density)\n\n def set_density(self, density, units='atom/b-cm'):\n \"\"\"Set density of a material.\n\n Parameters\n ----------\n density : float\n Density\n units : {'atom/b-cm', 'g/cm3'}\n Units for density\n\n \"\"\"\n _dll.openmc_material_set_density(self._index, density, units.encode())\n\n def set_densities(self, nuclides, densities):\n \"\"\"Set the densities of a list of nuclides in a material\n\n Parameters\n ----------\n nuclides : iterable of str\n Nuclide names\n densities : iterable of float\n Corresponding densities in atom/b-cm\n\n \"\"\"\n # Convert strings to an array of char*\n nucs = (c_char_p * len(nuclides))()\n nucs[:] = [x.encode() for x in nuclides]\n\n # Get numpy array as a double*\n d = np.asarray(densities)\n dp = d.ctypes.data_as(POINTER(c_double))\n\n _dll.openmc_material_set_densities(self._index, len(nuclides), nucs, dp)\n\n\nclass _MaterialMapping(Mapping):\n def __getitem__(self, key):\n index = c_int32()\n try:\n _dll.openmc_get_material_index(key, index)\n except (AllocationError, InvalidIDError) as e:\n # __contains__ expects a KeyError to work correctly\n raise KeyError(str(e))\n return Material(index=index.value)\n\n def __iter__(self):\n for i in range(len(self)):\n yield Material(index=i).id\n\n def __len__(self):\n return _dll.n_materials()\n\n def __repr__(self):\n return repr(dict(self))\n\nmaterials = _MaterialMapping()\n", "path": "openmc/capi/material.py"}], "after_files": [{"content": "from collections.abc import Mapping\nfrom ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_size_t\nfrom weakref import WeakValueDictionary\n\nimport numpy as np\nfrom numpy.ctypeslib import as_array\n\nfrom openmc.exceptions import AllocationError, InvalidIDError, OpenMCError\nfrom . import _dll, Nuclide\nfrom .core import _FortranObjectWithID\nfrom .error import _error_handler\n\n\n__all__ = ['Material', 'materials']\n\n# Material functions\n_dll.openmc_extend_materials.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]\n_dll.openmc_extend_materials.restype = c_int\n_dll.openmc_extend_materials.errcheck = _error_handler\n_dll.openmc_get_material_index.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_get_material_index.restype = c_int\n_dll.openmc_get_material_index.errcheck = _error_handler\n_dll.openmc_material_add_nuclide.argtypes = [\n c_int32, c_char_p, c_double]\n_dll.openmc_material_add_nuclide.restype = c_int\n_dll.openmc_material_add_nuclide.errcheck = _error_handler\n_dll.openmc_material_get_id.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_material_get_id.restype = c_int\n_dll.openmc_material_get_id.errcheck = _error_handler\n_dll.openmc_material_get_densities.argtypes = [\n c_int32, POINTER(POINTER(c_int)), POINTER(POINTER(c_double)),\n POINTER(c_int)]\n_dll.openmc_material_get_densities.restype = c_int\n_dll.openmc_material_get_densities.errcheck = _error_handler\n_dll.openmc_material_get_density.argtypes = [c_int32, POINTER(c_double)]\n_dll.openmc_material_get_density.restype = c_int\n_dll.openmc_material_get_density.errcheck = _error_handler\n_dll.openmc_material_get_volume.argtypes = [c_int32, POINTER(c_double)]\n_dll.openmc_material_get_volume.restype = c_int\n_dll.openmc_material_get_volume.errcheck = _error_handler\n_dll.openmc_material_set_density.argtypes = [c_int32, c_double, c_char_p]\n_dll.openmc_material_set_density.restype = c_int\n_dll.openmc_material_set_density.errcheck = _error_handler\n_dll.openmc_material_set_densities.argtypes = [\n c_int32, c_int, POINTER(c_char_p), POINTER(c_double)]\n_dll.openmc_material_set_densities.restype = c_int\n_dll.openmc_material_set_densities.errcheck = _error_handler\n_dll.openmc_material_set_id.argtypes = [c_int32, c_int32]\n_dll.openmc_material_set_id.restype = c_int\n_dll.openmc_material_set_id.errcheck = _error_handler\n_dll.openmc_material_set_volume.argtypes = [c_int32, c_double]\n_dll.openmc_material_set_volume.restype = c_int\n_dll.openmc_material_set_volume.errcheck = _error_handler\n_dll.n_materials.argtypes = []\n_dll.n_materials.restype = c_size_t\n\n\nclass Material(_FortranObjectWithID):\n \"\"\"Material stored internally.\n\n This class exposes a material that is stored internally in the OpenMC\n library. To obtain a view of a material with a given ID, use the\n :data:`openmc.capi.materials` mapping.\n\n Parameters\n ----------\n uid : int or None\n Unique ID of the tally\n new : bool\n When `index` is None, this argument controls whether a new object is\n created or a view to an existing object is returned.\n index : int or None\n Index in the `materials` array.\n\n Attributes\n ----------\n id : int\n ID of the material\n nuclides : list of str\n List of nuclides in the material\n densities : numpy.ndarray\n Array of densities in atom/b-cm\n\n \"\"\"\n __instances = WeakValueDictionary()\n\n def __new__(cls, uid=None, new=True, index=None):\n mapping = materials\n if index is None:\n if new:\n # Determine ID to assign\n if uid is None:\n uid = max(mapping, default=0) + 1\n else:\n if uid in mapping:\n raise AllocationError('A material with ID={} has already '\n 'been allocated.'.format(uid))\n\n index = c_int32()\n _dll.openmc_extend_materials(1, index, None)\n index = index.value\n else:\n index = mapping[uid]._index\n elif index == -1:\n # Special value indicates void material\n return None\n\n if index not in cls.__instances:\n instance = super(Material, cls).__new__(cls)\n instance._index = index\n if uid is not None:\n instance.id = uid\n cls.__instances[index] = instance\n\n return cls.__instances[index]\n\n @property\n def id(self):\n mat_id = c_int32()\n _dll.openmc_material_get_id(self._index, mat_id)\n return mat_id.value\n\n @id.setter\n def id(self, mat_id):\n _dll.openmc_material_set_id(self._index, mat_id)\n\n @property\n def volume(self):\n volume = c_double()\n try:\n _dll.openmc_material_get_volume(self._index, volume)\n except OpenMCError:\n return None\n return volume.value\n\n @volume.setter\n def volume(self, volume):\n _dll.openmc_material_set_volume(self._index, volume)\n\n @property\n def nuclides(self):\n return self._get_densities()[0]\n return nuclides\n\n @property\n def density(self):\n density = c_double()\n try:\n _dll.openmc_material_get_density(self._index, density)\n except OpenMCError:\n return None\n return density.value\n\n @property\n def densities(self):\n return self._get_densities()[1]\n\n def _get_densities(self):\n \"\"\"Get atom densities in a material.\n\n Returns\n -------\n list of string\n List of nuclide names\n numpy.ndarray\n Array of densities in atom/b-cm\n\n \"\"\"\n # Allocate memory for arguments that are written to\n nuclides = POINTER(c_int)()\n densities = POINTER(c_double)()\n n = c_int()\n\n # Get nuclide names and densities\n _dll.openmc_material_get_densities(self._index, nuclides, densities, n)\n\n # Convert to appropriate types and return\n nuclide_list = [Nuclide(nuclides[i]).name for i in range(n.value)]\n density_array = as_array(densities, (n.value,))\n return nuclide_list, density_array\n\n def add_nuclide(self, name, density):\n \"\"\"Add a nuclide to a material.\n\n Parameters\n ----------\n name : str\n Name of nuclide, e.g. 'U235'\n density : float\n Density in atom/b-cm\n\n \"\"\"\n _dll.openmc_material_add_nuclide(self._index, name.encode(), density)\n\n def set_density(self, density, units='atom/b-cm'):\n \"\"\"Set density of a material.\n\n Parameters\n ----------\n density : float\n Density\n units : {'atom/b-cm', 'g/cm3'}\n Units for density\n\n \"\"\"\n _dll.openmc_material_set_density(self._index, density, units.encode())\n\n def set_densities(self, nuclides, densities):\n \"\"\"Set the densities of a list of nuclides in a material\n\n Parameters\n ----------\n nuclides : iterable of str\n Nuclide names\n densities : iterable of float\n Corresponding densities in atom/b-cm\n\n \"\"\"\n # Convert strings to an array of char*\n nucs = (c_char_p * len(nuclides))()\n nucs[:] = [x.encode() for x in nuclides]\n\n # Get numpy array as a double*\n d = np.asarray(densities)\n dp = d.ctypes.data_as(POINTER(c_double))\n\n _dll.openmc_material_set_densities(self._index, len(nuclides), nucs, dp)\n\n\nclass _MaterialMapping(Mapping):\n def __getitem__(self, key):\n index = c_int32()\n try:\n _dll.openmc_get_material_index(key, index)\n except (AllocationError, InvalidIDError) as e:\n # __contains__ expects a KeyError to work correctly\n raise KeyError(str(e))\n return Material(index=index.value)\n\n def __iter__(self):\n for i in range(len(self)):\n yield Material(index=i).id\n\n def __len__(self):\n return _dll.n_materials()\n\n def __repr__(self):\n return repr(dict(self))\n\nmaterials = _MaterialMapping()\n", "path": "openmc/capi/material.py"}]}
| 2,901 | 286 |
gh_patches_debug_1015
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-2366
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
build --no-directory-urls feature not working
Hello.
I created new project using
```
mkdocs new .
mkdocs build --no-directory-urls
```
Still when I try to visit home page I am brought to . directory instead of .\index.html
Sorry for some russian text :D

I also tried to modife .yml file like this
`use_directory_urls: false`
Still not working in "offline" mode
```
pip show mkdocs
Name: mkdocs
Version: 1.1.2
Summary: Project documentation with Markdown.
Home-page: https://www.mkdocs.org
Author: Tom Christie
Author-email: [email protected]
License: BSD
Location: /home/demaunt/.local/lib/python3.8/site-packages
Requires: Markdown, lunr, Jinja2, PyYAML, click, livereload, tornado
Required-by: mkdocs-redirects, mkdocs-minify-plugin, mkdocs-material
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/structure/pages.py`
Content:
```
1 import os
2 import logging
3 from urllib.parse import urlparse, urlunparse, urljoin
4 from urllib.parse import unquote as urlunquote
5
6 import markdown
7 from markdown.extensions import Extension
8 from markdown.treeprocessors import Treeprocessor
9 from markdown.util import AMP_SUBSTITUTE
10
11 from mkdocs.structure.toc import get_toc
12 from mkdocs.utils import meta, get_build_date, get_markdown_title
13
14 log = logging.getLogger(__name__)
15
16
17 class Page:
18 def __init__(self, title, file, config):
19 file.page = self
20 self.file = file
21 self.title = title
22
23 # Navigation attributes
24 self.parent = None
25 self.children = None
26 self.previous_page = None
27 self.next_page = None
28 self.active = False
29
30 self.is_section = False
31 self.is_page = True
32 self.is_link = False
33
34 self.update_date = get_build_date()
35
36 self._set_canonical_url(config.get('site_url', None))
37 self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))
38
39 # Placeholders to be filled in later in the build process.
40 self.markdown = None
41 self.content = None
42 self.toc = []
43 self.meta = {}
44
45 def __eq__(self, other):
46 return (
47 isinstance(other, self.__class__) and
48 self.title == other.title and
49 self.file == other.file
50 )
51
52 def __ne__(self, other):
53 return not self.__eq__(other)
54
55 def __repr__(self):
56 title = "'{}'".format(self.title) if (self.title is not None) else '[blank]'
57 return "Page(title={}, url='{}')".format(title, self.abs_url or self.file.url)
58
59 def _indent_print(self, depth=0):
60 return '{}{}'.format(' ' * depth, repr(self))
61
62 def _get_active(self):
63 """ Return active status of page. """
64 return self.__active
65
66 def _set_active(self, value):
67 """ Set active status of page and ancestors. """
68 self.__active = bool(value)
69 if self.parent is not None:
70 self.parent.active = bool(value)
71
72 active = property(_get_active, _set_active)
73
74 @property
75 def is_index(self):
76 return self.file.name == 'index'
77
78 @property
79 def is_top_level(self):
80 return self.parent is None
81
82 @property
83 def is_homepage(self):
84 return self.is_top_level and self.is_index and self.file.url == '.'
85
86 @property
87 def url(self):
88 return '' if self.file.url == '.' else self.file.url
89
90 @property
91 def ancestors(self):
92 if self.parent is None:
93 return []
94 return [self.parent] + self.parent.ancestors
95
96 def _set_canonical_url(self, base):
97 if base:
98 if not base.endswith('/'):
99 base += '/'
100 self.canonical_url = urljoin(base, self.url)
101 self.abs_url = urlparse(self.canonical_url).path
102 else:
103 self.canonical_url = None
104 self.abs_url = None
105
106 def _set_edit_url(self, repo_url, edit_uri):
107 if repo_url and edit_uri:
108 src_path = self.file.src_path.replace('\\', '/')
109 self.edit_url = urljoin(repo_url, edit_uri + src_path)
110 else:
111 self.edit_url = None
112
113 def read_source(self, config):
114 source = config['plugins'].run_event(
115 'page_read_source', page=self, config=config
116 )
117 if source is None:
118 try:
119 with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
120 source = f.read()
121 except OSError:
122 log.error('File not found: {}'.format(self.file.src_path))
123 raise
124 except ValueError:
125 log.error('Encoding error reading file: {}'.format(self.file.src_path))
126 raise
127
128 self.markdown, self.meta = meta.get_data(source)
129 self._set_title()
130
131 def _set_title(self):
132 """
133 Set the title for a Markdown document.
134
135 Check these in order and use the first that returns a valid title:
136 - value provided on init (passed in from config)
137 - value of metadata 'title'
138 - content of the first H1 in Markdown content
139 - convert filename to title
140 """
141 if self.title is not None:
142 return
143
144 if 'title' in self.meta:
145 self.title = self.meta['title']
146 return
147
148 title = get_markdown_title(self.markdown)
149
150 if title is None:
151 if self.is_homepage:
152 title = 'Home'
153 else:
154 title = self.file.name.replace('-', ' ').replace('_', ' ')
155 # Capitalize if the filename was all lowercase, otherwise leave it as-is.
156 if title.lower() == title:
157 title = title.capitalize()
158
159 self.title = title
160
161 def render(self, config, files):
162 """
163 Convert the Markdown source file to HTML as per the config.
164 """
165
166 extensions = [
167 _RelativePathExtension(self.file, files)
168 ] + config['markdown_extensions']
169
170 md = markdown.Markdown(
171 extensions=extensions,
172 extension_configs=config['mdx_configs'] or {}
173 )
174 self.content = md.convert(self.markdown)
175 self.toc = get_toc(getattr(md, 'toc_tokens', []))
176
177
178 class _RelativePathTreeprocessor(Treeprocessor):
179 def __init__(self, file, files):
180 self.file = file
181 self.files = files
182
183 def run(self, root):
184 """
185 Update urls on anchors and images to make them relative
186
187 Iterates through the full document tree looking for specific
188 tags and then makes them relative based on the site navigation
189 """
190 for element in root.iter():
191 if element.tag == 'a':
192 key = 'href'
193 elif element.tag == 'img':
194 key = 'src'
195 else:
196 continue
197
198 url = element.get(key)
199 new_url = self.path_to_url(url)
200 element.set(key, new_url)
201
202 return root
203
204 def path_to_url(self, url):
205 scheme, netloc, path, params, query, fragment = urlparse(url)
206
207 if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\')
208 or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):
209 # Ignore URLs unless they are a relative link to a source file.
210 # AMP_SUBSTITUTE is used internally by Markdown only for email.
211 # No '.' in the last part of a path indicates path does not point to a file.
212 return url
213
214 # Determine the filepath of the target.
215 target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))
216 target_path = os.path.normpath(target_path).lstrip(os.sep)
217
218 # Validate that the target exists in files collection.
219 if target_path not in self.files:
220 log.warning(
221 "Documentation file '{}' contains a link to '{}' which is not found "
222 "in the documentation files.".format(self.file.src_path, target_path)
223 )
224 return url
225 target_file = self.files.get_file_from_path(target_path)
226 path = target_file.url_relative_to(self.file)
227 components = (scheme, netloc, path, params, query, fragment)
228 return urlunparse(components)
229
230
231 class _RelativePathExtension(Extension):
232 """
233 The Extension class is what we pass to markdown, it then
234 registers the Treeprocessor.
235 """
236
237 def __init__(self, file, files):
238 self.file = file
239 self.files = files
240
241 def extendMarkdown(self, md):
242 relpath = _RelativePathTreeprocessor(self.file, self.files)
243 md.treeprocessors.register(relpath, "relpath", 0)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py
--- a/mkdocs/structure/pages.py
+++ b/mkdocs/structure/pages.py
@@ -81,7 +81,7 @@
@property
def is_homepage(self):
- return self.is_top_level and self.is_index and self.file.url == '.'
+ return self.is_top_level and self.is_index and self.file.url in ['.', 'index.html']
@property
def url(self):
|
{"golden_diff": "diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py\n--- a/mkdocs/structure/pages.py\n+++ b/mkdocs/structure/pages.py\n@@ -81,7 +81,7 @@\n \n @property\n def is_homepage(self):\n- return self.is_top_level and self.is_index and self.file.url == '.'\n+ return self.is_top_level and self.is_index and self.file.url in ['.', 'index.html']\n \n @property\n def url(self):\n", "issue": "build --no-directory-urls feature not working\nHello. \r\nI created new project using \r\n```\r\nmkdocs new .\r\nmkdocs build --no-directory-urls\r\n```\r\n\r\nStill when I try to visit home page I am brought to . directory instead of .\\index.html\r\n\r\nSorry for some russian text :D\r\n\r\n\r\nI also tried to modife .yml file like this \r\n`use_directory_urls: false`\r\n\r\nStill not working in \"offline\" mode\r\n\r\n```\r\npip show mkdocs\r\nName: mkdocs\r\nVersion: 1.1.2\r\nSummary: Project documentation with Markdown.\r\nHome-page: https://www.mkdocs.org\r\nAuthor: Tom Christie\r\nAuthor-email: [email protected]\r\nLicense: BSD\r\nLocation: /home/demaunt/.local/lib/python3.8/site-packages\r\nRequires: Markdown, lunr, Jinja2, PyYAML, click, livereload, tornado\r\nRequired-by: mkdocs-redirects, mkdocs-minify-plugin, mkdocs-material\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nimport logging\nfrom urllib.parse import urlparse, urlunparse, urljoin\nfrom urllib.parse import unquote as urlunquote\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, get_build_date, get_markdown_title\n\nlog = logging.getLogger(__name__)\n\n\nclass Page:\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n self.update_date = get_build_date()\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.title == other.title and\n self.file == other.file\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index and self.file.url == '.'\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event(\n 'page_read_source', page=self, config=config\n )\n if source is None:\n try:\n with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except OSError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc_tokens', []))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\\\')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.register(relpath, \"relpath\", 0)\n", "path": "mkdocs/structure/pages.py"}], "after_files": [{"content": "import os\nimport logging\nfrom urllib.parse import urlparse, urlunparse, urljoin\nfrom urllib.parse import unquote as urlunquote\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, get_build_date, get_markdown_title\n\nlog = logging.getLogger(__name__)\n\n\nclass Page:\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n self.update_date = get_build_date()\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n return (\n isinstance(other, self.__class__) and\n self.title == other.title and\n self.file == other.file\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index and self.file.url in ['.', 'index.html']\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event(\n 'page_read_source', page=self, config=config\n )\n if source is None:\n try:\n with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except OSError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc_tokens', []))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\\\')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.register(relpath, \"relpath\", 0)\n", "path": "mkdocs/structure/pages.py"}]}
| 2,893 | 114 |
gh_patches_debug_28233
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-8069
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`dvc.api.params_show()`: returns all params for stage with no params
# Bug Report
## Description
When using `dvc.api.params_show(stages="train")`, if there are no `params` for stage `train`, all of `params.yaml` will be returned.
### Reproduce
Use the following files:
```yaml
# dvc.yaml
stages:
train:
cmd: python train.py
deps:
- train.py
```
```yaml
#params.yaml
batch_size: 64
lr: 0.2
```
```python
# train.py
import dvc.api
print(dvc.api.params_show(stages="train"))
```
Running `python train.py` or `dvc repro/exp run` outputs `{'batch_size': 64, 'lr': 0.2}`.
### Expected
An error for no params found or an empty dict.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/repo/params/show.py`
Content:
```
1 import logging
2 import os
3 from collections import defaultdict
4 from copy import copy
5 from typing import (
6 TYPE_CHECKING,
7 Callable,
8 Dict,
9 Iterable,
10 List,
11 Optional,
12 Tuple,
13 )
14
15 from scmrepo.exceptions import SCMError
16
17 from dvc.dependency.param import ParamsDependency
18 from dvc.repo import locked
19 from dvc.repo.collect import collect
20 from dvc.scm import NoSCMError
21 from dvc.stage import PipelineStage
22 from dvc.ui import ui
23 from dvc.utils import error_handler, errored_revisions, onerror_collect
24 from dvc.utils.collections import ensure_list
25 from dvc.utils.serialize import LOADERS
26
27 if TYPE_CHECKING:
28 from dvc.output import Output
29 from dvc.repo import Repo
30
31 logger = logging.getLogger(__name__)
32
33
34 def _is_params(dep: "Output"):
35 return isinstance(dep, ParamsDependency)
36
37
38 def _collect_configs(
39 repo: "Repo", rev, targets=None, duplicates=False
40 ) -> Tuple[List["Output"], List[str]]:
41
42 params, fs_paths = collect(
43 repo,
44 targets=targets or [],
45 deps=True,
46 output_filter=_is_params,
47 rev=rev,
48 duplicates=duplicates,
49 )
50 all_fs_paths = fs_paths + [p.fs_path for p in params]
51 if not targets:
52 default_params = repo.fs.path.join(
53 repo.root_dir, ParamsDependency.DEFAULT_PARAMS_FILE
54 )
55 if default_params not in all_fs_paths and repo.fs.exists(
56 default_params
57 ):
58 fs_paths.append(default_params)
59 return params, fs_paths
60
61
62 @error_handler
63 def _read_fs_path(fs, fs_path, **kwargs):
64 suffix = fs.path.suffix(fs_path).lower()
65 loader = LOADERS[suffix]
66 return loader(fs_path, fs=fs)
67
68
69 def _read_params(
70 repo,
71 params,
72 params_fs_paths,
73 deps=False,
74 onerror: Optional[Callable] = None,
75 stages: Optional[Iterable[str]] = None,
76 ):
77 res: Dict[str, Dict] = defaultdict(lambda: defaultdict(dict))
78 fs_paths = copy(params_fs_paths)
79
80 if deps or stages:
81 for param in params:
82 if stages and param.stage.addressing not in stages:
83 continue
84 params_dict = error_handler(param.read_params)(
85 onerror=onerror, flatten=False
86 )
87 if params_dict:
88 name = os.sep.join(repo.fs.path.relparts(param.fs_path))
89 res[name]["data"].update(params_dict["data"])
90 if name in fs_paths:
91 fs_paths.remove(name)
92 else:
93 fs_paths += [param.fs_path for param in params]
94
95 for fs_path in fs_paths:
96 from_path = _read_fs_path(repo.fs, fs_path, onerror=onerror)
97 if from_path:
98 name = os.sep.join(repo.fs.path.relparts(fs_path))
99 res[name] = from_path
100
101 return res
102
103
104 def _collect_vars(repo, params, stages=None) -> Dict:
105 vars_params: Dict[str, Dict] = defaultdict(dict)
106
107 for stage in repo.index.stages:
108 if isinstance(stage, PipelineStage) and stage.tracked_vars:
109 if stages and stage.addressing not in stages:
110 continue
111 for file, vars_ in stage.tracked_vars.items():
112 # `params` file are shown regardless of `tracked` or not
113 # to reduce noise and duplication, they are skipped
114 if file in params:
115 continue
116
117 name = os.sep.join(repo.fs.path.parts(file))
118 vars_params[name].update(vars_)
119 return vars_params
120
121
122 @locked
123 def show(
124 repo,
125 revs=None,
126 targets=None,
127 deps=False,
128 onerror: Callable = None,
129 stages=None,
130 ):
131 if onerror is None:
132 onerror = onerror_collect
133 res = {}
134
135 targets = ensure_list(targets)
136 targets = [repo.dvcfs.from_os_path(target) for target in targets]
137
138 for branch in repo.brancher(revs=revs):
139 params = error_handler(_gather_params)(
140 repo=repo,
141 rev=branch,
142 targets=targets,
143 deps=deps,
144 onerror=onerror,
145 stages=stages,
146 )
147
148 if params:
149 res[branch] = params
150
151 # Hide workspace params if they are the same as in the active branch
152 try:
153 active_branch = repo.scm.active_branch()
154 except (SCMError, NoSCMError):
155 # SCMError - detached head
156 # NoSCMError - no repo case
157 pass
158 else:
159 if res.get("workspace") == res.get(active_branch):
160 res.pop("workspace", None)
161
162 errored = errored_revisions(res)
163 if errored:
164 ui.error_write(
165 "DVC failed to load some parameters for following revisions:"
166 f" '{', '.join(errored)}'."
167 )
168
169 return res
170
171
172 def _gather_params(
173 repo, rev, targets=None, deps=False, onerror=None, stages=None
174 ):
175 param_outs, params_fs_paths = _collect_configs(
176 repo, rev, targets=targets, duplicates=deps or stages
177 )
178 params = _read_params(
179 repo,
180 params=param_outs,
181 params_fs_paths=params_fs_paths,
182 deps=deps,
183 onerror=onerror,
184 stages=stages,
185 )
186 vars_params = _collect_vars(repo, params, stages=stages)
187
188 # NOTE: only those that are not added as a ParamDependency are
189 # included so we don't need to recursively merge them yet.
190 for key, vals in vars_params.items():
191 params[key]["data"] = vals
192 return params
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/repo/params/show.py b/dvc/repo/params/show.py
--- a/dvc/repo/params/show.py
+++ b/dvc/repo/params/show.py
@@ -36,7 +36,7 @@
def _collect_configs(
- repo: "Repo", rev, targets=None, duplicates=False
+ repo: "Repo", rev, targets=None, deps=False, stages=None
) -> Tuple[List["Output"], List[str]]:
params, fs_paths = collect(
@@ -45,10 +45,10 @@
deps=True,
output_filter=_is_params,
rev=rev,
- duplicates=duplicates,
+ duplicates=deps or stages is not None,
)
all_fs_paths = fs_paths + [p.fs_path for p in params]
- if not targets:
+ if not any([deps, targets, stages]):
default_params = repo.fs.path.join(
repo.root_dir, ParamsDependency.DEFAULT_PARAMS_FILE
)
@@ -56,6 +56,7 @@
default_params
):
fs_paths.append(default_params)
+
return params, fs_paths
@@ -173,7 +174,7 @@
repo, rev, targets=None, deps=False, onerror=None, stages=None
):
param_outs, params_fs_paths = _collect_configs(
- repo, rev, targets=targets, duplicates=deps or stages
+ repo, rev, targets=targets, deps=deps, stages=stages
)
params = _read_params(
repo,
|
{"golden_diff": "diff --git a/dvc/repo/params/show.py b/dvc/repo/params/show.py\n--- a/dvc/repo/params/show.py\n+++ b/dvc/repo/params/show.py\n@@ -36,7 +36,7 @@\n \n \n def _collect_configs(\n- repo: \"Repo\", rev, targets=None, duplicates=False\n+ repo: \"Repo\", rev, targets=None, deps=False, stages=None\n ) -> Tuple[List[\"Output\"], List[str]]:\n \n params, fs_paths = collect(\n@@ -45,10 +45,10 @@\n deps=True,\n output_filter=_is_params,\n rev=rev,\n- duplicates=duplicates,\n+ duplicates=deps or stages is not None,\n )\n all_fs_paths = fs_paths + [p.fs_path for p in params]\n- if not targets:\n+ if not any([deps, targets, stages]):\n default_params = repo.fs.path.join(\n repo.root_dir, ParamsDependency.DEFAULT_PARAMS_FILE\n )\n@@ -56,6 +56,7 @@\n default_params\n ):\n fs_paths.append(default_params)\n+\n return params, fs_paths\n \n \n@@ -173,7 +174,7 @@\n repo, rev, targets=None, deps=False, onerror=None, stages=None\n ):\n param_outs, params_fs_paths = _collect_configs(\n- repo, rev, targets=targets, duplicates=deps or stages\n+ repo, rev, targets=targets, deps=deps, stages=stages\n )\n params = _read_params(\n repo,\n", "issue": "`dvc.api.params_show()`: returns all params for stage with no params\n# Bug Report\r\n\r\n## Description\r\n\r\nWhen using `dvc.api.params_show(stages=\"train\")`, if there are no `params` for stage `train`, all of `params.yaml` will be returned.\r\n\r\n### Reproduce\r\n\r\nUse the following files:\r\n\r\n```yaml\r\n# dvc.yaml\r\nstages:\r\n train:\r\n cmd: python train.py\r\n deps:\r\n - train.py\r\n```\r\n\r\n```yaml\r\n#params.yaml\r\nbatch_size: 64\r\nlr: 0.2\r\n```\r\n\r\n```python\r\n# train.py\r\nimport dvc.api\r\n\r\nprint(dvc.api.params_show(stages=\"train\"))\r\n```\r\n\r\nRunning `python train.py` or `dvc repro/exp run` outputs `{'batch_size': 64, 'lr': 0.2}`.\r\n\r\n### Expected\r\n\r\nAn error for no params found or an empty dict.\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom collections import defaultdict\nfrom copy import copy\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Dict,\n Iterable,\n List,\n Optional,\n Tuple,\n)\n\nfrom scmrepo.exceptions import SCMError\n\nfrom dvc.dependency.param import ParamsDependency\nfrom dvc.repo import locked\nfrom dvc.repo.collect import collect\nfrom dvc.scm import NoSCMError\nfrom dvc.stage import PipelineStage\nfrom dvc.ui import ui\nfrom dvc.utils import error_handler, errored_revisions, onerror_collect\nfrom dvc.utils.collections import ensure_list\nfrom dvc.utils.serialize import LOADERS\n\nif TYPE_CHECKING:\n from dvc.output import Output\n from dvc.repo import Repo\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_params(dep: \"Output\"):\n return isinstance(dep, ParamsDependency)\n\n\ndef _collect_configs(\n repo: \"Repo\", rev, targets=None, duplicates=False\n) -> Tuple[List[\"Output\"], List[str]]:\n\n params, fs_paths = collect(\n repo,\n targets=targets or [],\n deps=True,\n output_filter=_is_params,\n rev=rev,\n duplicates=duplicates,\n )\n all_fs_paths = fs_paths + [p.fs_path for p in params]\n if not targets:\n default_params = repo.fs.path.join(\n repo.root_dir, ParamsDependency.DEFAULT_PARAMS_FILE\n )\n if default_params not in all_fs_paths and repo.fs.exists(\n default_params\n ):\n fs_paths.append(default_params)\n return params, fs_paths\n\n\n@error_handler\ndef _read_fs_path(fs, fs_path, **kwargs):\n suffix = fs.path.suffix(fs_path).lower()\n loader = LOADERS[suffix]\n return loader(fs_path, fs=fs)\n\n\ndef _read_params(\n repo,\n params,\n params_fs_paths,\n deps=False,\n onerror: Optional[Callable] = None,\n stages: Optional[Iterable[str]] = None,\n):\n res: Dict[str, Dict] = defaultdict(lambda: defaultdict(dict))\n fs_paths = copy(params_fs_paths)\n\n if deps or stages:\n for param in params:\n if stages and param.stage.addressing not in stages:\n continue\n params_dict = error_handler(param.read_params)(\n onerror=onerror, flatten=False\n )\n if params_dict:\n name = os.sep.join(repo.fs.path.relparts(param.fs_path))\n res[name][\"data\"].update(params_dict[\"data\"])\n if name in fs_paths:\n fs_paths.remove(name)\n else:\n fs_paths += [param.fs_path for param in params]\n\n for fs_path in fs_paths:\n from_path = _read_fs_path(repo.fs, fs_path, onerror=onerror)\n if from_path:\n name = os.sep.join(repo.fs.path.relparts(fs_path))\n res[name] = from_path\n\n return res\n\n\ndef _collect_vars(repo, params, stages=None) -> Dict:\n vars_params: Dict[str, Dict] = defaultdict(dict)\n\n for stage in repo.index.stages:\n if isinstance(stage, PipelineStage) and stage.tracked_vars:\n if stages and stage.addressing not in stages:\n continue\n for file, vars_ in stage.tracked_vars.items():\n # `params` file are shown regardless of `tracked` or not\n # to reduce noise and duplication, they are skipped\n if file in params:\n continue\n\n name = os.sep.join(repo.fs.path.parts(file))\n vars_params[name].update(vars_)\n return vars_params\n\n\n@locked\ndef show(\n repo,\n revs=None,\n targets=None,\n deps=False,\n onerror: Callable = None,\n stages=None,\n):\n if onerror is None:\n onerror = onerror_collect\n res = {}\n\n targets = ensure_list(targets)\n targets = [repo.dvcfs.from_os_path(target) for target in targets]\n\n for branch in repo.brancher(revs=revs):\n params = error_handler(_gather_params)(\n repo=repo,\n rev=branch,\n targets=targets,\n deps=deps,\n onerror=onerror,\n stages=stages,\n )\n\n if params:\n res[branch] = params\n\n # Hide workspace params if they are the same as in the active branch\n try:\n active_branch = repo.scm.active_branch()\n except (SCMError, NoSCMError):\n # SCMError - detached head\n # NoSCMError - no repo case\n pass\n else:\n if res.get(\"workspace\") == res.get(active_branch):\n res.pop(\"workspace\", None)\n\n errored = errored_revisions(res)\n if errored:\n ui.error_write(\n \"DVC failed to load some parameters for following revisions:\"\n f\" '{', '.join(errored)}'.\"\n )\n\n return res\n\n\ndef _gather_params(\n repo, rev, targets=None, deps=False, onerror=None, stages=None\n):\n param_outs, params_fs_paths = _collect_configs(\n repo, rev, targets=targets, duplicates=deps or stages\n )\n params = _read_params(\n repo,\n params=param_outs,\n params_fs_paths=params_fs_paths,\n deps=deps,\n onerror=onerror,\n stages=stages,\n )\n vars_params = _collect_vars(repo, params, stages=stages)\n\n # NOTE: only those that are not added as a ParamDependency are\n # included so we don't need to recursively merge them yet.\n for key, vals in vars_params.items():\n params[key][\"data\"] = vals\n return params\n", "path": "dvc/repo/params/show.py"}], "after_files": [{"content": "import logging\nimport os\nfrom collections import defaultdict\nfrom copy import copy\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Dict,\n Iterable,\n List,\n Optional,\n Tuple,\n)\n\nfrom scmrepo.exceptions import SCMError\n\nfrom dvc.dependency.param import ParamsDependency\nfrom dvc.repo import locked\nfrom dvc.repo.collect import collect\nfrom dvc.scm import NoSCMError\nfrom dvc.stage import PipelineStage\nfrom dvc.ui import ui\nfrom dvc.utils import error_handler, errored_revisions, onerror_collect\nfrom dvc.utils.collections import ensure_list\nfrom dvc.utils.serialize import LOADERS\n\nif TYPE_CHECKING:\n from dvc.output import Output\n from dvc.repo import Repo\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_params(dep: \"Output\"):\n return isinstance(dep, ParamsDependency)\n\n\ndef _collect_configs(\n repo: \"Repo\", rev, targets=None, deps=False, stages=None\n) -> Tuple[List[\"Output\"], List[str]]:\n\n params, fs_paths = collect(\n repo,\n targets=targets or [],\n deps=True,\n output_filter=_is_params,\n rev=rev,\n duplicates=deps or stages is not None,\n )\n all_fs_paths = fs_paths + [p.fs_path for p in params]\n if not any([deps, targets, stages]):\n default_params = repo.fs.path.join(\n repo.root_dir, ParamsDependency.DEFAULT_PARAMS_FILE\n )\n if default_params not in all_fs_paths and repo.fs.exists(\n default_params\n ):\n fs_paths.append(default_params)\n\n return params, fs_paths\n\n\n@error_handler\ndef _read_fs_path(fs, fs_path, **kwargs):\n suffix = fs.path.suffix(fs_path).lower()\n loader = LOADERS[suffix]\n return loader(fs_path, fs=fs)\n\n\ndef _read_params(\n repo,\n params,\n params_fs_paths,\n deps=False,\n onerror: Optional[Callable] = None,\n stages: Optional[Iterable[str]] = None,\n):\n res: Dict[str, Dict] = defaultdict(lambda: defaultdict(dict))\n fs_paths = copy(params_fs_paths)\n\n if deps or stages:\n for param in params:\n if stages and param.stage.addressing not in stages:\n continue\n params_dict = error_handler(param.read_params)(\n onerror=onerror, flatten=False\n )\n if params_dict:\n name = os.sep.join(repo.fs.path.relparts(param.fs_path))\n res[name][\"data\"].update(params_dict[\"data\"])\n if name in fs_paths:\n fs_paths.remove(name)\n else:\n fs_paths += [param.fs_path for param in params]\n\n for fs_path in fs_paths:\n from_path = _read_fs_path(repo.fs, fs_path, onerror=onerror)\n if from_path:\n name = os.sep.join(repo.fs.path.relparts(fs_path))\n res[name] = from_path\n\n return res\n\n\ndef _collect_vars(repo, params, stages=None) -> Dict:\n vars_params: Dict[str, Dict] = defaultdict(dict)\n\n for stage in repo.index.stages:\n if isinstance(stage, PipelineStage) and stage.tracked_vars:\n if stages and stage.addressing not in stages:\n continue\n for file, vars_ in stage.tracked_vars.items():\n # `params` file are shown regardless of `tracked` or not\n # to reduce noise and duplication, they are skipped\n if file in params:\n continue\n\n name = os.sep.join(repo.fs.path.parts(file))\n vars_params[name].update(vars_)\n return vars_params\n\n\n@locked\ndef show(\n repo,\n revs=None,\n targets=None,\n deps=False,\n onerror: Callable = None,\n stages=None,\n):\n if onerror is None:\n onerror = onerror_collect\n res = {}\n\n targets = ensure_list(targets)\n targets = [repo.dvcfs.from_os_path(target) for target in targets]\n\n for branch in repo.brancher(revs=revs):\n params = error_handler(_gather_params)(\n repo=repo,\n rev=branch,\n targets=targets,\n deps=deps,\n onerror=onerror,\n stages=stages,\n )\n\n if params:\n res[branch] = params\n\n # Hide workspace params if they are the same as in the active branch\n try:\n active_branch = repo.scm.active_branch()\n except (SCMError, NoSCMError):\n # SCMError - detached head\n # NoSCMError - no repo case\n pass\n else:\n if res.get(\"workspace\") == res.get(active_branch):\n res.pop(\"workspace\", None)\n\n errored = errored_revisions(res)\n if errored:\n ui.error_write(\n \"DVC failed to load some parameters for following revisions:\"\n f\" '{', '.join(errored)}'.\"\n )\n\n return res\n\n\ndef _gather_params(\n repo, rev, targets=None, deps=False, onerror=None, stages=None\n):\n param_outs, params_fs_paths = _collect_configs(\n repo, rev, targets=targets, deps=deps, stages=stages\n )\n params = _read_params(\n repo,\n params=param_outs,\n params_fs_paths=params_fs_paths,\n deps=deps,\n onerror=onerror,\n stages=stages,\n )\n vars_params = _collect_vars(repo, params, stages=stages)\n\n # NOTE: only those that are not added as a ParamDependency are\n # included so we don't need to recursively merge them yet.\n for key, vals in vars_params.items():\n params[key][\"data\"] = vals\n return params\n", "path": "dvc/repo/params/show.py"}]}
| 2,168 | 347 |
gh_patches_debug_9551
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-539
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EfficientDet images_size has to be divisible by 128
## 📓 Documentation Update
**What part of documentation was unclear or wrong?**
It has to be clear that the image size for the efficientdet model has to be divisible by 128
**Describe the solution you'd like**
Add this information to the `efficientdet/model.py` docstring and also make it explicit in the tutorials that use efficientdet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/efficientdet/model.py`
Content:
```
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench
6 from effdet import create_model_from_config
7 from effdet.efficientdet import HeadNet
8
9
10 def model(
11 model_name: str, num_classes: int, img_size: int, pretrained: bool = True
12 ) -> nn.Module:
13 """Creates the efficientdet model specified by `model_name`.
14
15 The model implementation is by Ross Wightman, original repo
16 [here](https://github.com/rwightman/efficientdet-pytorch).
17
18 # Arguments
19 model_name: Specifies the model to create. For pretrained models, check
20 [this](https://github.com/rwightman/efficientdet-pytorch#models) table.
21 num_classes: Number of classes of your dataset (including background).
22 img_size: Image size that will be fed to the model. Must be squared and
23 divisible by 64.
24 pretrained: If True, use a pretrained backbone (on COCO).
25
26 # Returns
27 A PyTorch model.
28 """
29 config = get_efficientdet_config(model_name=model_name)
30 config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size
31
32 model_bench = create_model_from_config(
33 config,
34 bench_task="train",
35 bench_labeler=True,
36 num_classes=num_classes,
37 pretrained=pretrained,
38 )
39
40 # TODO: Break down param groups for backbone
41 def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:
42 unwrapped = unwrap_bench(model)
43
44 layers = [
45 unwrapped.backbone,
46 unwrapped.fpn,
47 nn.Sequential(unwrapped.class_net, unwrapped.box_net),
48 ]
49 param_groups = [list(layer.parameters()) for layer in layers]
50 check_all_model_params_in_groups2(model, param_groups)
51
52 return param_groups
53
54 model_bench.param_groups = MethodType(param_groups_fn, model_bench)
55
56 return model_bench
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/efficientdet/model.py b/icevision/models/efficientdet/model.py
--- a/icevision/models/efficientdet/model.py
+++ b/icevision/models/efficientdet/model.py
@@ -20,7 +20,7 @@
[this](https://github.com/rwightman/efficientdet-pytorch#models) table.
num_classes: Number of classes of your dataset (including background).
img_size: Image size that will be fed to the model. Must be squared and
- divisible by 64.
+ divisible by 128.
pretrained: If True, use a pretrained backbone (on COCO).
# Returns
|
{"golden_diff": "diff --git a/icevision/models/efficientdet/model.py b/icevision/models/efficientdet/model.py\n--- a/icevision/models/efficientdet/model.py\n+++ b/icevision/models/efficientdet/model.py\n@@ -20,7 +20,7 @@\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n- divisible by 64.\n+ divisible by 128.\n pretrained: If True, use a pretrained backbone (on COCO).\n \n # Returns\n", "issue": "EfficientDet images_size has to be divisible by 128\n## \ud83d\udcd3 Documentation Update\r\n**What part of documentation was unclear or wrong?**\r\nIt has to be clear that the image size for the efficientdet model has to be divisible by 128\r\n\r\n**Describe the solution you'd like**\r\nAdd this information to the `efficientdet/model.py` docstring and also make it explicit in the tutorials that use efficientdet.\r\n\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n model_name: str, num_classes: int, img_size: int, pretrained: bool = True\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n model_name: Specifies the model to create. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 64.\n pretrained: If True, use a pretrained backbone (on COCO).\n\n # Returns\n A PyTorch model.\n \"\"\"\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes,\n pretrained=pretrained,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/efficientdet/model.py"}], "after_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n model_name: str, num_classes: int, img_size: int, pretrained: bool = True\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n model_name: Specifies the model to create. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 128.\n pretrained: If True, use a pretrained backbone (on COCO).\n\n # Returns\n A PyTorch model.\n \"\"\"\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes,\n pretrained=pretrained,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/efficientdet/model.py"}]}
| 919 | 153 |
gh_patches_debug_22145
|
rasdani/github-patches
|
git_diff
|
huggingface__transformers-7035
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
non-interactive transformers-cli upload?
# 🚀 Feature request
Currently, `transformers-cli upload` works only interactively due to its prompt:
`Proceed? [Y/n]`
After running the updated model conversion, I would like to be able to do:
```
cd data
transformers-cli upload fsmt-wmt19-ru-en
transformers-cli upload fsmt-wmt19-en-ru
transformers-cli upload fsmt-wmt19-de-en
transformers-cli upload fsmt-wmt19-en-de
cd -
```
But this won't work:
Would it be possible to add a `-y` override?
Alternatively, would it be possible to give it all dirs to upload in one command?
```
transformers-cli upload fsmt-wmt19-ru-en fsmt-wmt19-en-ru fsmt-wmt19-de-en fsmt-wmt19-en-de
```
## Motivation
I have been re-uploading 4 x 1.1GB models on a relatively slow connection, and I have to be around to hit Y for each one of them, which is very counter-productive, as I have to go back and re-check whether each upload has been completed. I can probably code some shell expect script to feed it automatically, but this defeats the purpose.
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/transformers/commands/user.py`
Content:
```
1 import os
2 import sys
3 from argparse import ArgumentParser
4 from getpass import getpass
5 from typing import List, Union
6
7 from requests.exceptions import HTTPError
8 from transformers.commands import BaseTransformersCLICommand
9 from transformers.hf_api import HfApi, HfFolder
10
11
12 UPLOAD_MAX_FILES = 15
13
14
15 class UserCommands(BaseTransformersCLICommand):
16 @staticmethod
17 def register_subcommand(parser: ArgumentParser):
18 login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
19 login_parser.set_defaults(func=lambda args: LoginCommand(args))
20 whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
21 whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
22 logout_parser = parser.add_parser("logout", help="Log out")
23 logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
24 # s3
25 s3_parser = parser.add_parser("s3", help="{ls, rm} Commands to interact with the files you upload on S3.")
26 s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
27 ls_parser = s3_subparsers.add_parser("ls")
28 ls_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
29 ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))
30 rm_parser = s3_subparsers.add_parser("rm")
31 rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
32 rm_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
33 rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))
34 # upload
35 upload_parser = parser.add_parser("upload", help="Upload a model to S3.")
36 upload_parser.add_argument(
37 "path", type=str, help="Local path of the model folder or individual file to upload."
38 )
39 upload_parser.add_argument("--organization", type=str, help="Optional: organization namespace.")
40 upload_parser.add_argument(
41 "--filename", type=str, default=None, help="Optional: override individual object filename on S3."
42 )
43 upload_parser.set_defaults(func=lambda args: UploadCommand(args))
44
45
46 class ANSI:
47 """
48 Helper for en.wikipedia.org/wiki/ANSI_escape_code
49 """
50
51 _bold = "\u001b[1m"
52 _red = "\u001b[31m"
53 _reset = "\u001b[0m"
54
55 @classmethod
56 def bold(cls, s):
57 return "{}{}{}".format(cls._bold, s, cls._reset)
58
59 @classmethod
60 def red(cls, s):
61 return "{}{}{}".format(cls._bold + cls._red, s, cls._reset)
62
63
64 class BaseUserCommand:
65 def __init__(self, args):
66 self.args = args
67 self._api = HfApi()
68
69
70 class LoginCommand(BaseUserCommand):
71 def run(self):
72 print(
73 """
74 _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
75 _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
76 _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
77 _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
78 _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
79
80 """
81 )
82 username = input("Username: ")
83 password = getpass()
84 try:
85 token = self._api.login(username, password)
86 except HTTPError as e:
87 # probably invalid credentials, display error message.
88 print(e)
89 print(ANSI.red(e.response.text))
90 exit(1)
91 HfFolder.save_token(token)
92 print("Login successful")
93 print("Your token:", token, "\n")
94 print("Your token has been saved to", HfFolder.path_token)
95
96
97 class WhoamiCommand(BaseUserCommand):
98 def run(self):
99 token = HfFolder.get_token()
100 if token is None:
101 print("Not logged in")
102 exit()
103 try:
104 user, orgs = self._api.whoami(token)
105 print(user)
106 if orgs:
107 print(ANSI.bold("orgs: "), ",".join(orgs))
108 except HTTPError as e:
109 print(e)
110 print(ANSI.red(e.response.text))
111 exit(1)
112
113
114 class LogoutCommand(BaseUserCommand):
115 def run(self):
116 token = HfFolder.get_token()
117 if token is None:
118 print("Not logged in")
119 exit()
120 HfFolder.delete_token()
121 self._api.logout(token)
122 print("Successfully logged out.")
123
124
125 class ListObjsCommand(BaseUserCommand):
126 def tabulate(self, rows: List[List[Union[str, int]]], headers: List[str]) -> str:
127 """
128 Inspired by:
129 stackoverflow.com/a/8356620/593036
130 stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
131 """
132 col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
133 row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
134 lines = []
135 lines.append(row_format.format(*headers))
136 lines.append(row_format.format(*["-" * w for w in col_widths]))
137 for row in rows:
138 lines.append(row_format.format(*row))
139 return "\n".join(lines)
140
141 def run(self):
142 token = HfFolder.get_token()
143 if token is None:
144 print("Not logged in")
145 exit(1)
146 try:
147 objs = self._api.list_objs(token, organization=self.args.organization)
148 except HTTPError as e:
149 print(e)
150 print(ANSI.red(e.response.text))
151 exit(1)
152 if len(objs) == 0:
153 print("No shared file yet")
154 exit()
155 rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]
156 print(self.tabulate(rows, headers=["Filename", "LastModified", "ETag", "Size"]))
157
158
159 class DeleteObjCommand(BaseUserCommand):
160 def run(self):
161 token = HfFolder.get_token()
162 if token is None:
163 print("Not logged in")
164 exit(1)
165 try:
166 self._api.delete_obj(token, filename=self.args.filename, organization=self.args.organization)
167 except HTTPError as e:
168 print(e)
169 print(ANSI.red(e.response.text))
170 exit(1)
171 print("Done")
172
173
174 class UploadCommand(BaseUserCommand):
175 def walk_dir(self, rel_path):
176 """
177 Recursively list all files in a folder.
178 """
179 entries: List[os.DirEntry] = list(os.scandir(rel_path))
180 files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)
181 for f in entries:
182 if f.is_dir():
183 files += self.walk_dir(f.path)
184 return files
185
186 def run(self):
187 token = HfFolder.get_token()
188 if token is None:
189 print("Not logged in")
190 exit(1)
191 local_path = os.path.abspath(self.args.path)
192 if os.path.isdir(local_path):
193 if self.args.filename is not None:
194 raise ValueError("Cannot specify a filename override when uploading a folder.")
195 rel_path = os.path.basename(local_path)
196 files = self.walk_dir(rel_path)
197 elif os.path.isfile(local_path):
198 filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)
199 files = [(local_path, filename)]
200 else:
201 raise ValueError("Not a valid file or directory: {}".format(local_path))
202
203 if sys.platform == "win32":
204 files = [(filepath, filename.replace(os.sep, "/")) for filepath, filename in files]
205
206 if len(files) > UPLOAD_MAX_FILES:
207 print(
208 "About to upload {} files to S3. This is probably wrong. Please filter files before uploading.".format(
209 ANSI.bold(len(files))
210 )
211 )
212 exit(1)
213
214 user, _ = self._api.whoami(token)
215 namespace = self.args.organization if self.args.organization is not None else user
216
217 for filepath, filename in files:
218 print(
219 "About to upload file {} to S3 under filename {} and namespace {}".format(
220 ANSI.bold(filepath), ANSI.bold(filename), ANSI.bold(namespace)
221 )
222 )
223
224 choice = input("Proceed? [Y/n] ").lower()
225 if not (choice == "" or choice == "y" or choice == "yes"):
226 print("Abort")
227 exit()
228 print(ANSI.bold("Uploading... This might take a while if files are large"))
229 for filepath, filename in files:
230 try:
231 access_url = self._api.presign_and_upload(
232 token=token, filename=filename, filepath=filepath, organization=self.args.organization
233 )
234 except HTTPError as e:
235 print(e)
236 print(ANSI.red(e.response.text))
237 exit(1)
238 print("Your file now lives at:")
239 print(access_url)
240
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/transformers/commands/user.py b/src/transformers/commands/user.py
--- a/src/transformers/commands/user.py
+++ b/src/transformers/commands/user.py
@@ -40,6 +40,7 @@
upload_parser.add_argument(
"--filename", type=str, default=None, help="Optional: override individual object filename on S3."
)
+ upload_parser.add_argument("-y", "--yes", action="store_true", help="Optional: answer Yes to the prompt")
upload_parser.set_defaults(func=lambda args: UploadCommand(args))
@@ -221,10 +222,11 @@
)
)
- choice = input("Proceed? [Y/n] ").lower()
- if not (choice == "" or choice == "y" or choice == "yes"):
- print("Abort")
- exit()
+ if not self.args.yes:
+ choice = input("Proceed? [Y/n] ").lower()
+ if not (choice == "" or choice == "y" or choice == "yes"):
+ print("Abort")
+ exit()
print(ANSI.bold("Uploading... This might take a while if files are large"))
for filepath, filename in files:
try:
|
{"golden_diff": "diff --git a/src/transformers/commands/user.py b/src/transformers/commands/user.py\n--- a/src/transformers/commands/user.py\n+++ b/src/transformers/commands/user.py\n@@ -40,6 +40,7 @@\n upload_parser.add_argument(\n \"--filename\", type=str, default=None, help=\"Optional: override individual object filename on S3.\"\n )\n+ upload_parser.add_argument(\"-y\", \"--yes\", action=\"store_true\", help=\"Optional: answer Yes to the prompt\")\n upload_parser.set_defaults(func=lambda args: UploadCommand(args))\n \n \n@@ -221,10 +222,11 @@\n )\n )\n \n- choice = input(\"Proceed? [Y/n] \").lower()\n- if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n- print(\"Abort\")\n- exit()\n+ if not self.args.yes:\n+ choice = input(\"Proceed? [Y/n] \").lower()\n+ if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n+ print(\"Abort\")\n+ exit()\n print(ANSI.bold(\"Uploading... This might take a while if files are large\"))\n for filepath, filename in files:\n try:\n", "issue": "non-interactive transformers-cli upload?\n# \ud83d\ude80 Feature request\r\n\r\nCurrently, `transformers-cli upload` works only interactively due to its prompt:\r\n`Proceed? [Y/n]`\r\n\r\nAfter running the updated model conversion, I would like to be able to do:\r\n```\r\ncd data\r\ntransformers-cli upload fsmt-wmt19-ru-en\r\ntransformers-cli upload fsmt-wmt19-en-ru\r\ntransformers-cli upload fsmt-wmt19-de-en\r\ntransformers-cli upload fsmt-wmt19-en-de\r\ncd -\r\n```\r\nBut this won't work:\r\n\r\nWould it be possible to add a `-y` override?\r\n\r\nAlternatively, would it be possible to give it all dirs to upload in one command?\r\n```\r\ntransformers-cli upload fsmt-wmt19-ru-en fsmt-wmt19-en-ru fsmt-wmt19-de-en fsmt-wmt19-en-de\r\n```\r\n\r\n## Motivation\r\n\r\nI have been re-uploading 4 x 1.1GB models on a relatively slow connection, and I have to be around to hit Y for each one of them, which is very counter-productive, as I have to go back and re-check whether each upload has been completed. I can probably code some shell expect script to feed it automatically, but this defeats the purpose.\r\n\r\nThank you!\r\n\n", "before_files": [{"content": "import os\nimport sys\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom requests.exceptions import HTTPError\nfrom transformers.commands import BaseTransformersCLICommand\nfrom transformers.hf_api import HfApi, HfFolder\n\n\nUPLOAD_MAX_FILES = 15\n\n\nclass UserCommands(BaseTransformersCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\"login\", help=\"Log in using the same credentials as on huggingface.co\")\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\"whoami\", help=\"Find out which huggingface.co account you are logged in as.\")\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n # s3\n s3_parser = parser.add_parser(\"s3\", help=\"{ls, rm} Commands to interact with the files you upload on S3.\")\n s3_subparsers = s3_parser.add_subparsers(help=\"s3 related commands\")\n ls_parser = s3_subparsers.add_parser(\"ls\")\n ls_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))\n rm_parser = s3_subparsers.add_parser(\"rm\")\n rm_parser.add_argument(\"filename\", type=str, help=\"individual object filename to delete from S3.\")\n rm_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))\n # upload\n upload_parser = parser.add_parser(\"upload\", help=\"Upload a model to S3.\")\n upload_parser.add_argument(\n \"path\", type=str, help=\"Local path of the model folder or individual file to upload.\"\n )\n upload_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n upload_parser.add_argument(\n \"--filename\", type=str, default=None, help=\"Optional: override individual object filename on S3.\"\n )\n upload_parser.set_defaults(func=lambda args: UploadCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print(\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token:\", token, \"\\n\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n user, orgs = self._api.whoami(token)\n print(user)\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListObjsCommand(BaseUserCommand):\n def tabulate(self, rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n stackoverflow.com/a/8356620/593036\n stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]\n print(self.tabulate(rows, headers=[\"Filename\", \"LastModified\", \"ETag\", \"Size\"]))\n\n\nclass DeleteObjCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n self._api.delete_obj(token, filename=self.args.filename, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"Done\")\n\n\nclass UploadCommand(BaseUserCommand):\n def walk_dir(self, rel_path):\n \"\"\"\n Recursively list all files in a folder.\n \"\"\"\n entries: List[os.DirEntry] = list(os.scandir(rel_path))\n files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)\n for f in entries:\n if f.is_dir():\n files += self.walk_dir(f.path)\n return files\n\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n local_path = os.path.abspath(self.args.path)\n if os.path.isdir(local_path):\n if self.args.filename is not None:\n raise ValueError(\"Cannot specify a filename override when uploading a folder.\")\n rel_path = os.path.basename(local_path)\n files = self.walk_dir(rel_path)\n elif os.path.isfile(local_path):\n filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)\n files = [(local_path, filename)]\n else:\n raise ValueError(\"Not a valid file or directory: {}\".format(local_path))\n\n if sys.platform == \"win32\":\n files = [(filepath, filename.replace(os.sep, \"/\")) for filepath, filename in files]\n\n if len(files) > UPLOAD_MAX_FILES:\n print(\n \"About to upload {} files to S3. This is probably wrong. Please filter files before uploading.\".format(\n ANSI.bold(len(files))\n )\n )\n exit(1)\n\n user, _ = self._api.whoami(token)\n namespace = self.args.organization if self.args.organization is not None else user\n\n for filepath, filename in files:\n print(\n \"About to upload file {} to S3 under filename {} and namespace {}\".format(\n ANSI.bold(filepath), ANSI.bold(filename), ANSI.bold(namespace)\n )\n )\n\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n print(ANSI.bold(\"Uploading... This might take a while if files are large\"))\n for filepath, filename in files:\n try:\n access_url = self._api.presign_and_upload(\n token=token, filename=filename, filepath=filepath, organization=self.args.organization\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"Your file now lives at:\")\n print(access_url)\n", "path": "src/transformers/commands/user.py"}], "after_files": [{"content": "import os\nimport sys\nfrom argparse import ArgumentParser\nfrom getpass import getpass\nfrom typing import List, Union\n\nfrom requests.exceptions import HTTPError\nfrom transformers.commands import BaseTransformersCLICommand\nfrom transformers.hf_api import HfApi, HfFolder\n\n\nUPLOAD_MAX_FILES = 15\n\n\nclass UserCommands(BaseTransformersCLICommand):\n @staticmethod\n def register_subcommand(parser: ArgumentParser):\n login_parser = parser.add_parser(\"login\", help=\"Log in using the same credentials as on huggingface.co\")\n login_parser.set_defaults(func=lambda args: LoginCommand(args))\n whoami_parser = parser.add_parser(\"whoami\", help=\"Find out which huggingface.co account you are logged in as.\")\n whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))\n logout_parser = parser.add_parser(\"logout\", help=\"Log out\")\n logout_parser.set_defaults(func=lambda args: LogoutCommand(args))\n # s3\n s3_parser = parser.add_parser(\"s3\", help=\"{ls, rm} Commands to interact with the files you upload on S3.\")\n s3_subparsers = s3_parser.add_subparsers(help=\"s3 related commands\")\n ls_parser = s3_subparsers.add_parser(\"ls\")\n ls_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))\n rm_parser = s3_subparsers.add_parser(\"rm\")\n rm_parser.add_argument(\"filename\", type=str, help=\"individual object filename to delete from S3.\")\n rm_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))\n # upload\n upload_parser = parser.add_parser(\"upload\", help=\"Upload a model to S3.\")\n upload_parser.add_argument(\n \"path\", type=str, help=\"Local path of the model folder or individual file to upload.\"\n )\n upload_parser.add_argument(\"--organization\", type=str, help=\"Optional: organization namespace.\")\n upload_parser.add_argument(\n \"--filename\", type=str, default=None, help=\"Optional: override individual object filename on S3.\"\n )\n upload_parser.add_argument(\"-y\", \"--yes\", action=\"store_true\", help=\"Optional: answer Yes to the prompt\")\n upload_parser.set_defaults(func=lambda args: UploadCommand(args))\n\n\nclass ANSI:\n \"\"\"\n Helper for en.wikipedia.org/wiki/ANSI_escape_code\n \"\"\"\n\n _bold = \"\\u001b[1m\"\n _red = \"\\u001b[31m\"\n _reset = \"\\u001b[0m\"\n\n @classmethod\n def bold(cls, s):\n return \"{}{}{}\".format(cls._bold, s, cls._reset)\n\n @classmethod\n def red(cls, s):\n return \"{}{}{}\".format(cls._bold + cls._red, s, cls._reset)\n\n\nclass BaseUserCommand:\n def __init__(self, args):\n self.args = args\n self._api = HfApi()\n\n\nclass LoginCommand(BaseUserCommand):\n def run(self):\n print(\n \"\"\"\n _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n\n \"\"\"\n )\n username = input(\"Username: \")\n password = getpass()\n try:\n token = self._api.login(username, password)\n except HTTPError as e:\n # probably invalid credentials, display error message.\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n HfFolder.save_token(token)\n print(\"Login successful\")\n print(\"Your token:\", token, \"\\n\")\n print(\"Your token has been saved to\", HfFolder.path_token)\n\n\nclass WhoamiCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n try:\n user, orgs = self._api.whoami(token)\n print(user)\n if orgs:\n print(ANSI.bold(\"orgs: \"), \",\".join(orgs))\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n\n\nclass LogoutCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit()\n HfFolder.delete_token()\n self._api.logout(token)\n print(\"Successfully logged out.\")\n\n\nclass ListObjsCommand(BaseUserCommand):\n def tabulate(self, rows: List[List[Union[str, int]]], headers: List[str]) -> str:\n \"\"\"\n Inspired by:\n stackoverflow.com/a/8356620/593036\n stackoverflow.com/questions/9535954/printing-lists-as-tabular-data\n \"\"\"\n col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]\n row_format = (\"{{:{}}} \" * len(headers)).format(*col_widths)\n lines = []\n lines.append(row_format.format(*headers))\n lines.append(row_format.format(*[\"-\" * w for w in col_widths]))\n for row in rows:\n lines.append(row_format.format(*row))\n return \"\\n\".join(lines)\n\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n objs = self._api.list_objs(token, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n if len(objs) == 0:\n print(\"No shared file yet\")\n exit()\n rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]\n print(self.tabulate(rows, headers=[\"Filename\", \"LastModified\", \"ETag\", \"Size\"]))\n\n\nclass DeleteObjCommand(BaseUserCommand):\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n try:\n self._api.delete_obj(token, filename=self.args.filename, organization=self.args.organization)\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"Done\")\n\n\nclass UploadCommand(BaseUserCommand):\n def walk_dir(self, rel_path):\n \"\"\"\n Recursively list all files in a folder.\n \"\"\"\n entries: List[os.DirEntry] = list(os.scandir(rel_path))\n files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)\n for f in entries:\n if f.is_dir():\n files += self.walk_dir(f.path)\n return files\n\n def run(self):\n token = HfFolder.get_token()\n if token is None:\n print(\"Not logged in\")\n exit(1)\n local_path = os.path.abspath(self.args.path)\n if os.path.isdir(local_path):\n if self.args.filename is not None:\n raise ValueError(\"Cannot specify a filename override when uploading a folder.\")\n rel_path = os.path.basename(local_path)\n files = self.walk_dir(rel_path)\n elif os.path.isfile(local_path):\n filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)\n files = [(local_path, filename)]\n else:\n raise ValueError(\"Not a valid file or directory: {}\".format(local_path))\n\n if sys.platform == \"win32\":\n files = [(filepath, filename.replace(os.sep, \"/\")) for filepath, filename in files]\n\n if len(files) > UPLOAD_MAX_FILES:\n print(\n \"About to upload {} files to S3. This is probably wrong. Please filter files before uploading.\".format(\n ANSI.bold(len(files))\n )\n )\n exit(1)\n\n user, _ = self._api.whoami(token)\n namespace = self.args.organization if self.args.organization is not None else user\n\n for filepath, filename in files:\n print(\n \"About to upload file {} to S3 under filename {} and namespace {}\".format(\n ANSI.bold(filepath), ANSI.bold(filename), ANSI.bold(namespace)\n )\n )\n\n if not self.args.yes:\n choice = input(\"Proceed? [Y/n] \").lower()\n if not (choice == \"\" or choice == \"y\" or choice == \"yes\"):\n print(\"Abort\")\n exit()\n print(ANSI.bold(\"Uploading... This might take a while if files are large\"))\n for filepath, filename in files:\n try:\n access_url = self._api.presign_and_upload(\n token=token, filename=filename, filepath=filepath, organization=self.args.organization\n )\n except HTTPError as e:\n print(e)\n print(ANSI.red(e.response.text))\n exit(1)\n print(\"Your file now lives at:\")\n print(access_url)\n", "path": "src/transformers/commands/user.py"}]}
| 3,290 | 278 |
gh_patches_debug_18239
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pymodbus 2.2.0rc2 failing to read registers/coils
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 2.7.12
* OS: OSX
* Pymodbus: 2.2.0rc2
* Modbus Hardware (if used): NA
### Pymodbus Specific
* Server: tcp/rtu/ascii - sync/async --> all
* Client: tcp/rtu/ascii - sync/async --> all
### Description
Basic operations on TCP to read coils/registers failing with `[Input/Output] No Response received from the remote unit/Unable to decode response`.
The same code works just fine in earlier versions of pymodbus (2.2.0rc1 and below)
### Code and Logs
```python
# code and logs here.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # PyModbus documentation build configuration file, created by
4 # sphinx-quickstart on Wed Dec 20 12:31:10 2017.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 import os
20 import sys
21 import recommonmark
22 from recommonmark.parser import CommonMarkParser
23 from recommonmark.transform import AutoStructify
24 from pymodbus import __version__
25 parent_dir = os.path.abspath(os.pardir)
26 # examples = os.path.join(parent_dir, "examples")
27 example_contrib = os.path.join(parent_dir, "examples/contrib")
28 example_common = os.path.join(parent_dir, "examples/common")
29 example_gui = os.path.join(parent_dir, "examples/gui")
30
31 sys.path.insert(0, os.path.abspath(os.pardir))
32 sys.path.append(example_common)
33 sys.path.append(example_contrib)
34 sys.path.append(example_gui)
35 # sys.path.extend([examples, example_common, example_contrib, example_gui])
36 # sys.path.insert(0, os.path.abspath('../'))
37
38 github_doc_root = 'https://github.com/riptideio/pymodbus/tree/master/doc/'
39 # -- General configuration ------------------------------------------------
40
41 # If your documentation needs a minimal Sphinx version, state it here.
42 #
43 # needs_sphinx = '1.0'
44
45 # Add any Sphinx extension module names here, as strings. They can be
46 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
47 # ones.
48
49 #extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
50 extensions = ['sphinx.ext.autodoc', 'm2r']
51
52 # Add any paths that contain templates here, relative to this directory.
53 templates_path = ['_templates']
54
55 # The suffix(es) of source filenames.
56 # You can specify multiple suffix as a list of string:
57 #
58 #source_parsers = {
59 # '.md': CommonMarkParser,
60 #}
61
62 source_suffix = ['.rst', '.md']
63 # source_suffix = '.rst'
64
65 # The master toctree document.
66 master_doc = 'index'
67
68 # General information about the project.
69 project = u'PyModbus'
70 copyright = u'2017, Sanjay'
71 author = u'Sanjay'
72
73 # The version info for the project you're documenting, acts as replacement for
74 # |version| and |release|, also used in various other places throughout the
75 # built documents.
76 #
77 # The short X.Y version.
78 version = __version__
79 # The full version, including alpha/beta/rc tags.
80 release = __version__
81
82 # The language for content autogenerated by Sphinx. Refer to documentation
83 # for a list of supported languages.
84 #
85 # This is also used if you do content translation via gettext catalogs.
86 # Usually you set "language" from the command line for these cases.
87 language = None
88
89 # List of patterns, relative to source directory, that match files and
90 # directories to ignore when looking for source files.
91 # This patterns also effect to html_static_path and html_extra_path
92 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
93
94 # The name of the Pygments (syntax highlighting) style to use.
95 pygments_style = 'sphinx'
96
97 # If true, `todo` and `todoList` produce output, else they produce nothing.
98 todo_include_todos = False
99
100
101 # -- Options for HTML output ----------------------------------------------
102
103 # The theme to use for HTML and HTML Help pages. See the documentation for
104 # a list of builtin themes.
105 #
106 html_theme = 'sphinx_rtd_theme'
107
108 # Theme options are theme-specific and customize the look and feel of a theme
109 # further. For a list of options available for each theme, see the
110 # documentation.
111 #
112 # html_theme_options = {}
113
114 # Add any paths that contain custom static files (such as style sheets) here,
115 # relative to this directory. They are copied after the builtin static files,
116 # so a file named "default.css" will overwrite the builtin "default.css".
117 # html_static_path = ['_static']
118 html_static_path = []
119
120 # Custom sidebar templates, must be a dictionary that maps document names
121 # to template names.
122 #
123 # This is required for the alabaster theme
124 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
125 html_sidebars = {
126 '**': [
127 'relations.html', # needs 'show_related': True theme option to display
128 'searchbox.html',
129 ]
130 }
131
132
133 # -- Options for HTMLHelp output ------------------------------------------
134
135 # Output file base name for HTML help builder.
136 htmlhelp_basename = 'PyModbusdoc'
137
138
139 # -- Options for LaTeX output ---------------------------------------------
140
141 latex_elements = {
142 # The paper size ('letterpaper' or 'a4paper').
143 #
144 # 'papersize': 'letterpaper',
145
146 # The font size ('10pt', '11pt' or '12pt').
147 #
148 # 'pointsize': '10pt',
149
150 # Additional stuff for the LaTeX preamble.
151 #
152 # 'preamble': '',
153
154 # Latex figure (float) alignment
155 #
156 # 'figure_align': 'htbp',
157 }
158
159 # Grouping the document tree into LaTeX files. List of tuples
160 # (source start file, target name, title,
161 # author, documentclass [howto, manual, or own class]).
162 latex_documents = [
163 (master_doc, 'PyModbus.tex', u'PyModbus Documentation',
164 u'Sanjay', 'manual'),
165 ]
166
167
168 # -- Options for manual page output ---------------------------------------
169
170 # One entry per manual page. List of tuples
171 # (source start file, name, description, authors, manual section).
172 man_pages = [
173 (master_doc, 'pymodbus', u'PyModbus Documentation',
174 [author], 1)
175 ]
176
177
178 # -- Options for Texinfo output -------------------------------------------
179
180 # Grouping the document tree into Texinfo files. List of tuples
181 # (source start file, target name, title, author,
182 # dir menu entry, description, category)
183 texinfo_documents = [
184 (master_doc, 'PyModbus', u'PyModbus Documentation',
185 author, 'PyModbus', 'One line description of project.',
186 'Miscellaneous'),
187 ]
188
189
190 def setup(app):
191 app.add_config_value('recommonmark_config', {
192 'url_resolver': lambda url: github_doc_root + url,
193 'auto_toc_tree_section': 'Contents',
194 }, True)
195 app.add_transform(AutoStructify)
196
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -45,7 +45,9 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
+
+#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
+extensions = ['sphinx.ext.autodoc', 'm2r']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -53,9 +55,9 @@
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
-source_parsers = {
- '.md': CommonMarkParser,
-}
+#source_parsers = {
+# '.md': CommonMarkParser,
+#}
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -45,7 +45,9 @@\n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n-extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\n+\n+#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\n+extensions = ['sphinx.ext.autodoc', 'm2r']\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = ['_templates']\n@@ -53,9 +55,9 @@\n # The suffix(es) of source filenames.\n # You can specify multiple suffix as a list of string:\n #\n-source_parsers = {\n- '.md': CommonMarkParser,\n-}\n+#source_parsers = {\n+# '.md': CommonMarkParser,\n+#}\n \n source_suffix = ['.rst', '.md']\n # source_suffix = '.rst'\n", "issue": "Pymodbus 2.2.0rc2 failing to read registers/coils\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: OSX\r\n* Pymodbus: 2.2.0rc2\r\n* Modbus Hardware (if used): NA\r\n\r\n### Pymodbus Specific\r\n* Server: tcp/rtu/ascii - sync/async --> all\r\n* Client: tcp/rtu/ascii - sync/async --> all\r\n\r\n### Description\r\nBasic operations on TCP to read coils/registers failing with `[Input/Output] No Response received from the remote unit/Unable to decode response`.\r\n The same code works just fine in earlier versions of pymodbus (2.2.0rc1 and below)\r\n### Code and Logs\r\n\r\n```python\r\n # code and logs here.\r\n\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# PyModbus documentation build configuration file, created by\n# sphinx-quickstart on Wed Dec 20 12:31:10 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport recommonmark\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom pymodbus import __version__\nparent_dir = os.path.abspath(os.pardir)\n# examples = os.path.join(parent_dir, \"examples\")\nexample_contrib = os.path.join(parent_dir, \"examples/contrib\")\nexample_common = os.path.join(parent_dir, \"examples/common\")\nexample_gui = os.path.join(parent_dir, \"examples/gui\")\n\nsys.path.insert(0, os.path.abspath(os.pardir))\nsys.path.append(example_common)\nsys.path.append(example_contrib)\nsys.path.append(example_gui)\n# sys.path.extend([examples, example_common, example_contrib, example_gui])\n# sys.path.insert(0, os.path.abspath('../'))\n\ngithub_doc_root = 'https://github.com/riptideio/pymodbus/tree/master/doc/'\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n\n#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\nextensions = ['sphinx.ext.autodoc', 'm2r']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n#source_parsers = {\n# '.md': CommonMarkParser,\n#}\n\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'PyModbus'\ncopyright = u'2017, Sanjay'\nauthor = u'Sanjay'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\nhtml_static_path = []\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PyModbusdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PyModbus.tex', u'PyModbus Documentation',\n u'Sanjay', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'pymodbus', u'PyModbus Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PyModbus', u'PyModbus Documentation',\n author, 'PyModbus', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_toc_tree_section': 'Contents',\n }, True)\n app.add_transform(AutoStructify)\n\n", "path": "doc/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# PyModbus documentation build configuration file, created by\n# sphinx-quickstart on Wed Dec 20 12:31:10 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport recommonmark\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom pymodbus import __version__\nparent_dir = os.path.abspath(os.pardir)\n# examples = os.path.join(parent_dir, \"examples\")\nexample_contrib = os.path.join(parent_dir, \"examples/contrib\")\nexample_common = os.path.join(parent_dir, \"examples/common\")\nexample_gui = os.path.join(parent_dir, \"examples/gui\")\n\nsys.path.insert(0, os.path.abspath(os.pardir))\nsys.path.append(example_common)\nsys.path.append(example_contrib)\nsys.path.append(example_gui)\n# sys.path.extend([examples, example_common, example_contrib, example_gui])\n# sys.path.insert(0, os.path.abspath('../'))\n\ngithub_doc_root = 'https://github.com/riptideio/pymodbus/tree/master/doc/'\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\n\n#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\nextensions = ['sphinx.ext.autodoc', 'm2r']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n#source_parsers = {\n# '.md': CommonMarkParser,\n#}\n\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'PyModbus'\ncopyright = u'2017, Sanjay'\nauthor = u'Sanjay'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\nhtml_static_path = []\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PyModbusdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PyModbus.tex', u'PyModbus Documentation',\n u'Sanjay', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'pymodbus', u'PyModbus Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PyModbus', u'PyModbus Documentation',\n author, 'PyModbus', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_toc_tree_section': 'Contents',\n }, True)\n app.add_transform(AutoStructify)\n\n", "path": "doc/conf.py"}]}
| 2,537 | 237 |
gh_patches_debug_17672
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-234
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better error message if config structure passed to instantiate is invalid.
# 🐛 Bug
In [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in "Stack trace/error messages":
This is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages?
<!-- A clear and concise description of what the bug is. -->
## To reproduce
** Minimal Code/Config snippet to reproduce **
Set `params` to `bug_report` as in:
```YAML
class: ...
params: bug_report
```
** Stack trace/error message **
```
> AttributeError: 'str' object has no attribute 'merge_with'
[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'
```
**Optional video**
If applicable, record a short [video](https://asciinema.org) with to help explain your problem.
You can also
## Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
I expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided.
## System information
- <!-- Hydra Version --> Hydra Version: 0.9.0
- <!-- Operating system --> Operating System: Arch Linux
## Additional context
If you confirm that this should be fixed, I've already implemented the solution (IMO).
Better error message if config structure passed to instantiate is invalid.
# 🐛 Bug
In [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in "Stack trace/error messages":
This is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages?
<!-- A clear and concise description of what the bug is. -->
## To reproduce
** Minimal Code/Config snippet to reproduce **
Set `params` to `bug_report` as in:
```YAML
class: ...
params: bug_report
```
** Stack trace/error message **
```
> AttributeError: 'str' object has no attribute 'merge_with'
[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'
```
**Optional video**
If applicable, record a short [video](https://asciinema.org) with to help explain your problem.
You can also
## Expected Behavior
<!-- A clear and concise description of what you expected to happen. -->
I expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided.
## System information
- <!-- Hydra Version --> Hydra Version: 0.9.0
- <!-- Operating system --> Operating System: Arch Linux
## Additional context
If you confirm that this should be fixed, I've already implemented the solution (IMO).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/utils.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging.config
3
4 from omegaconf import OmegaConf
5
6 # pylint: disable=C0103
7 log = logging.getLogger(__name__)
8
9
10 def get_method(path):
11 return get_class(path)
12
13
14 def get_class(path):
15 try:
16 from importlib import import_module
17
18 module_path, _, class_name = path.rpartition(".")
19 mod = import_module(module_path)
20 try:
21 klass = getattr(mod, class_name)
22 except AttributeError:
23 raise ImportError(
24 "Class {} is not in module {}".format(class_name, module_path)
25 )
26 return klass
27 except ValueError as e:
28 log.error("Error initializing class " + path)
29 raise e
30
31
32 def get_static_method(full_method_name):
33 try:
34 spl = full_method_name.split(".")
35 method_name = spl.pop()
36 class_name = ".".join(spl)
37 clz = get_class(class_name)
38 return getattr(clz, method_name)
39 except Exception as e:
40 log.error("Error getting static method {} : {}".format(full_method_name, e))
41 raise e
42
43
44 def instantiate(config, *args, **kwargs):
45 assert config is not None, "Input config is None"
46 try:
47 clazz = get_class(config["class"])
48 params = config.params if "params" in config else OmegaConf.create()
49 params.merge_with(OmegaConf.create(kwargs))
50 return clazz(*args, **params)
51 except Exception as e:
52 log.error("Error instantiating {} : {}".format(config["class"], e))
53 raise e
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hydra/utils.py b/hydra/utils.py
--- a/hydra/utils.py
+++ b/hydra/utils.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging.config
-from omegaconf import OmegaConf
+from omegaconf import OmegaConf, DictConfig
# pylint: disable=C0103
log = logging.getLogger(__name__)
@@ -46,6 +46,11 @@
try:
clazz = get_class(config["class"])
params = config.params if "params" in config else OmegaConf.create()
+ assert isinstance(
+ params, DictConfig
+ ), "Input config params are expected to be a mapping, found {}".format(
+ type(config.params)
+ )
params.merge_with(OmegaConf.create(kwargs))
return clazz(*args, **params)
except Exception as e:
|
{"golden_diff": "diff --git a/hydra/utils.py b/hydra/utils.py\n--- a/hydra/utils.py\n+++ b/hydra/utils.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import logging.config\n \n-from omegaconf import OmegaConf\n+from omegaconf import OmegaConf, DictConfig\n \n # pylint: disable=C0103\n log = logging.getLogger(__name__)\n@@ -46,6 +46,11 @@\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n+ assert isinstance(\n+ params, DictConfig\n+ ), \"Input config params are expected to be a mapping, found {}\".format(\n+ type(config.params)\n+ )\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n", "issue": "Better error message if config structure passed to instantiate is invalid.\n# \ud83d\udc1b Bug\r\n\r\nIn [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in \"Stack trace/error messages\":\r\n\r\nThis is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages? \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\nSet `params` to `bug_report` as in:\r\n\r\n```YAML\r\nclass: ...\r\nparams: bug_report\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n> AttributeError: 'str' object has no attribute 'merge_with'\r\n[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'\r\n```\r\n**Optional video**\r\nIf applicable, record a short [video](https://asciinema.org) with to help explain your problem.\r\nYou can also \r\n\r\n## Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nI expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided. \r\n \r\n## System information\r\n- <!-- Hydra Version --> Hydra Version: 0.9.0\r\n\r\n- <!-- Operating system --> Operating System: Arch Linux\r\n\r\n## Additional context\r\nIf you confirm that this should be fixed, I've already implemented the solution (IMO). \r\n\nBetter error message if config structure passed to instantiate is invalid.\n# \ud83d\udc1b Bug\r\n\r\nIn [common patterns#creating objects](https://cli.dev/docs/patterns/objects), if the `params` are mistakenly valued with a single string, the [`instantiate`](https://github.com/facebookresearch/hydra/blob/cb7904c666b34a53e09194993a0dd7b10fd97c99/hydra/utils.py#L44-L53) function exists with the error mentioned in \"Stack trace/error messages\":\r\n\r\nThis is fine to some extent, which is do you (Hydra team) aim at providing this kind of output messages? \r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\nSet `params` to `bug_report` as in:\r\n\r\n```YAML\r\nclass: ...\r\nparams: bug_report\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\n> AttributeError: 'str' object has no attribute 'merge_with'\r\n[2019-10-16 02:25:21,592][hydra.utils][ERROR] - Error instantiating [[OBJECT_NAME]] : 'str' object has no attribute 'merge_with'\r\n```\r\n**Optional video**\r\nIf applicable, record a short [video](https://asciinema.org) with to help explain your problem.\r\nYou can also \r\n\r\n## Expected Behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nI expect to have some sort of an error message explaining that `params` should be a collection of values where [[TYPE]] was provided. \r\n \r\n## System information\r\n- <!-- Hydra Version --> Hydra Version: 0.9.0\r\n\r\n- <!-- Operating system --> Operating System: Arch Linux\r\n\r\n## Additional context\r\nIf you confirm that this should be fixed, I've already implemented the solution (IMO). \r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging.config\n\nfrom omegaconf import OmegaConf\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef get_method(path):\n return get_class(path)\n\n\ndef get_class(path):\n try:\n from importlib import import_module\n\n module_path, _, class_name = path.rpartition(\".\")\n mod = import_module(module_path)\n try:\n klass = getattr(mod, class_name)\n except AttributeError:\n raise ImportError(\n \"Class {} is not in module {}\".format(class_name, module_path)\n )\n return klass\n except ValueError as e:\n log.error(\"Error initializing class \" + path)\n raise e\n\n\ndef get_static_method(full_method_name):\n try:\n spl = full_method_name.split(\".\")\n method_name = spl.pop()\n class_name = \".\".join(spl)\n clz = get_class(class_name)\n return getattr(clz, method_name)\n except Exception as e:\n log.error(\"Error getting static method {} : {}\".format(full_method_name, e))\n raise e\n\n\ndef instantiate(config, *args, **kwargs):\n assert config is not None, \"Input config is None\"\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n log.error(\"Error instantiating {} : {}\".format(config[\"class\"], e))\n raise e\n", "path": "hydra/utils.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging.config\n\nfrom omegaconf import OmegaConf, DictConfig\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef get_method(path):\n return get_class(path)\n\n\ndef get_class(path):\n try:\n from importlib import import_module\n\n module_path, _, class_name = path.rpartition(\".\")\n mod = import_module(module_path)\n try:\n klass = getattr(mod, class_name)\n except AttributeError:\n raise ImportError(\n \"Class {} is not in module {}\".format(class_name, module_path)\n )\n return klass\n except ValueError as e:\n log.error(\"Error initializing class \" + path)\n raise e\n\n\ndef get_static_method(full_method_name):\n try:\n spl = full_method_name.split(\".\")\n method_name = spl.pop()\n class_name = \".\".join(spl)\n clz = get_class(class_name)\n return getattr(clz, method_name)\n except Exception as e:\n log.error(\"Error getting static method {} : {}\".format(full_method_name, e))\n raise e\n\n\ndef instantiate(config, *args, **kwargs):\n assert config is not None, \"Input config is None\"\n try:\n clazz = get_class(config[\"class\"])\n params = config.params if \"params\" in config else OmegaConf.create()\n assert isinstance(\n params, DictConfig\n ), \"Input config params are expected to be a mapping, found {}\".format(\n type(config.params)\n )\n params.merge_with(OmegaConf.create(kwargs))\n return clazz(*args, **params)\n except Exception as e:\n log.error(\"Error instantiating {} : {}\".format(config[\"class\"], e))\n raise e\n", "path": "hydra/utils.py"}]}
| 1,539 | 203 |
gh_patches_debug_64358
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-1922
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hashable classes
Hello people!
I wonder if entities like Repository, Issue, PullRequest and etc. could be hashable in future, i.e. using them in structures like dictionary or sets would be possible?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `github/GithubObject.py`
Content:
```
1 ############################ Copyrights and license ############################
2 # #
3 # Copyright 2012 Vincent Jacques <[email protected]> #
4 # Copyright 2012 Zearin <[email protected]> #
5 # Copyright 2013 AKFish <[email protected]> #
6 # Copyright 2013 Vincent Jacques <[email protected]> #
7 # Copyright 2014 Andrew Scheller <[email protected]> #
8 # Copyright 2014 Vincent Jacques <[email protected]> #
9 # Copyright 2016 Jakub Wilk <[email protected]> #
10 # Copyright 2016 Jannis Gebauer <[email protected]> #
11 # Copyright 2016 Peter Buckley <[email protected]> #
12 # Copyright 2016 Sam Corbett <[email protected]> #
13 # Copyright 2018 sfdye <[email protected]> #
14 # #
15 # This file is part of PyGithub. #
16 # http://pygithub.readthedocs.io/ #
17 # #
18 # PyGithub is free software: you can redistribute it and/or modify it under #
19 # the terms of the GNU Lesser General Public License as published by the Free #
20 # Software Foundation, either version 3 of the License, or (at your option) #
21 # any later version. #
22 # #
23 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
24 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
25 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
26 # details. #
27 # #
28 # You should have received a copy of the GNU Lesser General Public License #
29 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
30 # #
31 ################################################################################
32
33 import datetime
34 from operator import itemgetter
35
36 from . import Consts, GithubException
37
38
39 class _NotSetType:
40 def __repr__(self):
41 return "NotSet"
42
43 value = None
44
45
46 NotSet = _NotSetType()
47
48
49 class _ValuedAttribute:
50 def __init__(self, value):
51 self.value = value
52
53
54 class _BadAttribute:
55 def __init__(self, value, expectedType, exception=None):
56 self.__value = value
57 self.__expectedType = expectedType
58 self.__exception = exception
59
60 @property
61 def value(self):
62 raise GithubException.BadAttributeException(
63 self.__value, self.__expectedType, self.__exception
64 )
65
66
67 class GithubObject:
68 """
69 Base class for all classes representing objects returned by the API.
70 """
71
72 """
73 A global debug flag to enable header validation by requester for all objects
74 """
75 CHECK_AFTER_INIT_FLAG = False
76
77 @classmethod
78 def setCheckAfterInitFlag(cls, flag):
79 cls.CHECK_AFTER_INIT_FLAG = flag
80
81 def __init__(self, requester, headers, attributes, completed):
82 self._requester = requester
83 self._initAttributes()
84 self._storeAndUseAttributes(headers, attributes)
85
86 # Ask requester to do some checking, for debug and test purpose
87 # Since it's most handy to access and kinda all-knowing
88 if self.CHECK_AFTER_INIT_FLAG: # pragma no branch (Flag always set in tests)
89 requester.check_me(self)
90
91 def _storeAndUseAttributes(self, headers, attributes):
92 # Make sure headers are assigned before calling _useAttributes
93 # (Some derived classes will use headers in _useAttributes)
94 self._headers = headers
95 self._rawData = attributes
96 self._useAttributes(attributes)
97
98 @property
99 def raw_data(self):
100 """
101 :type: dict
102 """
103 self._completeIfNeeded()
104 return self._rawData
105
106 @property
107 def raw_headers(self):
108 """
109 :type: dict
110 """
111 self._completeIfNeeded()
112 return self._headers
113
114 @staticmethod
115 def _parentUrl(url):
116 return "/".join(url.split("/")[:-1])
117
118 @staticmethod
119 def __makeSimpleAttribute(value, type):
120 if value is None or isinstance(value, type):
121 return _ValuedAttribute(value)
122 else:
123 return _BadAttribute(value, type)
124
125 @staticmethod
126 def __makeSimpleListAttribute(value, type):
127 if isinstance(value, list) and all(
128 isinstance(element, type) for element in value
129 ):
130 return _ValuedAttribute(value)
131 else:
132 return _BadAttribute(value, [type])
133
134 @staticmethod
135 def __makeTransformedAttribute(value, type, transform):
136 if value is None:
137 return _ValuedAttribute(None)
138 elif isinstance(value, type):
139 try:
140 return _ValuedAttribute(transform(value))
141 except Exception as e:
142 return _BadAttribute(value, type, e)
143 else:
144 return _BadAttribute(value, type)
145
146 @staticmethod
147 def _makeStringAttribute(value):
148 return GithubObject.__makeSimpleAttribute(value, str)
149
150 @staticmethod
151 def _makeIntAttribute(value):
152 return GithubObject.__makeSimpleAttribute(value, int)
153
154 @staticmethod
155 def _makeFloatAttribute(value):
156 return GithubObject.__makeSimpleAttribute(value, float)
157
158 @staticmethod
159 def _makeBoolAttribute(value):
160 return GithubObject.__makeSimpleAttribute(value, bool)
161
162 @staticmethod
163 def _makeDictAttribute(value):
164 return GithubObject.__makeSimpleAttribute(value, dict)
165
166 @staticmethod
167 def _makeTimestampAttribute(value):
168 return GithubObject.__makeTransformedAttribute(
169 value, int, datetime.datetime.utcfromtimestamp
170 )
171
172 @staticmethod
173 def _makeDatetimeAttribute(value):
174 def parseDatetime(s):
175 if (
176 len(s) == 24
177 ): # pragma no branch (This branch was used only when creating a download)
178 # The Downloads API has been removed. I'm keeping this branch because I have no mean
179 # to check if it's really useless now.
180 return datetime.datetime.strptime(
181 s, "%Y-%m-%dT%H:%M:%S.000Z"
182 ) # pragma no cover (This branch was used only when creating a download)
183 elif len(s) >= 25:
184 return datetime.datetime.strptime(s[:19], "%Y-%m-%dT%H:%M:%S") + (
185 1 if s[19] == "-" else -1
186 ) * datetime.timedelta(hours=int(s[20:22]), minutes=int(s[23:25]))
187 else:
188 return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
189
190 return GithubObject.__makeTransformedAttribute(value, str, parseDatetime)
191
192 def _makeClassAttribute(self, klass, value):
193 return GithubObject.__makeTransformedAttribute(
194 value,
195 dict,
196 lambda value: klass(self._requester, self._headers, value, completed=False),
197 )
198
199 @staticmethod
200 def _makeListOfStringsAttribute(value):
201 return GithubObject.__makeSimpleListAttribute(value, str)
202
203 @staticmethod
204 def _makeListOfIntsAttribute(value):
205 return GithubObject.__makeSimpleListAttribute(value, int)
206
207 @staticmethod
208 def _makeListOfDictsAttribute(value):
209 return GithubObject.__makeSimpleListAttribute(value, dict)
210
211 @staticmethod
212 def _makeListOfListOfStringsAttribute(value):
213 return GithubObject.__makeSimpleListAttribute(value, list)
214
215 def _makeListOfClassesAttribute(self, klass, value):
216 if isinstance(value, list) and all(
217 isinstance(element, dict) for element in value
218 ):
219 return _ValuedAttribute(
220 [
221 klass(self._requester, self._headers, element, completed=False)
222 for element in value
223 ]
224 )
225 else:
226 return _BadAttribute(value, [dict])
227
228 def _makeDictOfStringsToClassesAttribute(self, klass, value):
229 if isinstance(value, dict) and all(
230 isinstance(key, str) and isinstance(element, dict)
231 for key, element in value.items()
232 ):
233 return _ValuedAttribute(
234 {
235 key: klass(self._requester, self._headers, element, completed=False)
236 for key, element in value.items()
237 }
238 )
239 else:
240 return _BadAttribute(value, {str: dict})
241
242 @property
243 def etag(self):
244 """
245 :type: str
246 """
247 return self._headers.get(Consts.RES_ETAG)
248
249 @property
250 def last_modified(self):
251 """
252 :type: str
253 """
254 return self._headers.get(Consts.RES_LAST_MODIFIED)
255
256 def get__repr__(self, params):
257 """
258 Converts the object to a nicely printable string.
259 """
260
261 def format_params(params):
262 items = list(params.items())
263 for k, v in sorted(items, key=itemgetter(0), reverse=True):
264 if isinstance(v, bytes):
265 v = v.decode("utf-8")
266 if isinstance(v, str):
267 v = f'"{v}"'
268 yield f"{k}={v}"
269
270 return "{class_name}({params})".format(
271 class_name=self.__class__.__name__,
272 params=", ".join(list(format_params(params))),
273 )
274
275
276 class NonCompletableGithubObject(GithubObject):
277 def _completeIfNeeded(self):
278 pass
279
280
281 class CompletableGithubObject(GithubObject):
282 def __init__(self, requester, headers, attributes, completed):
283 super().__init__(requester, headers, attributes, completed)
284 self.__completed = completed
285
286 def __eq__(self, other):
287 return other.__class__ is self.__class__ and other._url.value == self._url.value
288
289 def __ne__(self, other):
290 return not self == other
291
292 def _completeIfNotSet(self, value):
293 if value is NotSet:
294 self._completeIfNeeded()
295
296 def _completeIfNeeded(self):
297 if not self.__completed:
298 self.__complete()
299
300 def __complete(self):
301 if self._url.value is None:
302 raise GithubException.IncompletableObject(
303 400, "Returned object contains no URL", None
304 )
305 headers, data = self._requester.requestJsonAndCheck("GET", self._url.value)
306 self._storeAndUseAttributes(headers, data)
307 self.__completed = True
308
309 def update(self, additional_headers=None):
310 """
311 Check and update the object with conditional request
312 :rtype: Boolean value indicating whether the object is changed
313 """
314 conditionalRequestHeader = dict()
315 if self.etag is not None:
316 conditionalRequestHeader[Consts.REQ_IF_NONE_MATCH] = self.etag
317 if self.last_modified is not None:
318 conditionalRequestHeader[Consts.REQ_IF_MODIFIED_SINCE] = self.last_modified
319 if additional_headers is not None:
320 conditionalRequestHeader.update(additional_headers)
321
322 status, responseHeaders, output = self._requester.requestJson(
323 "GET", self._url.value, headers=conditionalRequestHeader
324 )
325 if status == 304:
326 return False
327 else:
328 headers, data = self._requester._Requester__check(
329 status, responseHeaders, output
330 )
331 self._storeAndUseAttributes(headers, data)
332 self.__completed = True
333 return True
334
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/github/GithubObject.py b/github/GithubObject.py
--- a/github/GithubObject.py
+++ b/github/GithubObject.py
@@ -286,6 +286,9 @@
def __eq__(self, other):
return other.__class__ is self.__class__ and other._url.value == self._url.value
+ def __hash__(self):
+ return hash(self._url.value)
+
def __ne__(self, other):
return not self == other
|
{"golden_diff": "diff --git a/github/GithubObject.py b/github/GithubObject.py\n--- a/github/GithubObject.py\n+++ b/github/GithubObject.py\n@@ -286,6 +286,9 @@\n def __eq__(self, other):\n return other.__class__ is self.__class__ and other._url.value == self._url.value\n \n+ def __hash__(self):\n+ return hash(self._url.value)\n+\n def __ne__(self, other):\n return not self == other\n", "issue": "Hashable classes\nHello people!\r\n\r\nI wonder if entities like Repository, Issue, PullRequest and etc. could be hashable in future, i.e. using them in structures like dictionary or sets would be possible?\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Andrew Scheller <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2016 Sam Corbett <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport datetime\nfrom operator import itemgetter\n\nfrom . import Consts, GithubException\n\n\nclass _NotSetType:\n def __repr__(self):\n return \"NotSet\"\n\n value = None\n\n\nNotSet = _NotSetType()\n\n\nclass _ValuedAttribute:\n def __init__(self, value):\n self.value = value\n\n\nclass _BadAttribute:\n def __init__(self, value, expectedType, exception=None):\n self.__value = value\n self.__expectedType = expectedType\n self.__exception = exception\n\n @property\n def value(self):\n raise GithubException.BadAttributeException(\n self.__value, self.__expectedType, self.__exception\n )\n\n\nclass GithubObject:\n \"\"\"\n Base class for all classes representing objects returned by the API.\n \"\"\"\n\n \"\"\"\n A global debug flag to enable header validation by requester for all objects\n \"\"\"\n CHECK_AFTER_INIT_FLAG = False\n\n @classmethod\n def setCheckAfterInitFlag(cls, flag):\n cls.CHECK_AFTER_INIT_FLAG = flag\n\n def __init__(self, requester, headers, attributes, completed):\n self._requester = requester\n self._initAttributes()\n self._storeAndUseAttributes(headers, attributes)\n\n # Ask requester to do some checking, for debug and test purpose\n # Since it's most handy to access and kinda all-knowing\n if self.CHECK_AFTER_INIT_FLAG: # pragma no branch (Flag always set in tests)\n requester.check_me(self)\n\n def _storeAndUseAttributes(self, headers, attributes):\n # Make sure headers are assigned before calling _useAttributes\n # (Some derived classes will use headers in _useAttributes)\n self._headers = headers\n self._rawData = attributes\n self._useAttributes(attributes)\n\n @property\n def raw_data(self):\n \"\"\"\n :type: dict\n \"\"\"\n self._completeIfNeeded()\n return self._rawData\n\n @property\n def raw_headers(self):\n \"\"\"\n :type: dict\n \"\"\"\n self._completeIfNeeded()\n return self._headers\n\n @staticmethod\n def _parentUrl(url):\n return \"/\".join(url.split(\"/\")[:-1])\n\n @staticmethod\n def __makeSimpleAttribute(value, type):\n if value is None or isinstance(value, type):\n return _ValuedAttribute(value)\n else:\n return _BadAttribute(value, type)\n\n @staticmethod\n def __makeSimpleListAttribute(value, type):\n if isinstance(value, list) and all(\n isinstance(element, type) for element in value\n ):\n return _ValuedAttribute(value)\n else:\n return _BadAttribute(value, [type])\n\n @staticmethod\n def __makeTransformedAttribute(value, type, transform):\n if value is None:\n return _ValuedAttribute(None)\n elif isinstance(value, type):\n try:\n return _ValuedAttribute(transform(value))\n except Exception as e:\n return _BadAttribute(value, type, e)\n else:\n return _BadAttribute(value, type)\n\n @staticmethod\n def _makeStringAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, str)\n\n @staticmethod\n def _makeIntAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, int)\n\n @staticmethod\n def _makeFloatAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, float)\n\n @staticmethod\n def _makeBoolAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, bool)\n\n @staticmethod\n def _makeDictAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, dict)\n\n @staticmethod\n def _makeTimestampAttribute(value):\n return GithubObject.__makeTransformedAttribute(\n value, int, datetime.datetime.utcfromtimestamp\n )\n\n @staticmethod\n def _makeDatetimeAttribute(value):\n def parseDatetime(s):\n if (\n len(s) == 24\n ): # pragma no branch (This branch was used only when creating a download)\n # The Downloads API has been removed. I'm keeping this branch because I have no mean\n # to check if it's really useless now.\n return datetime.datetime.strptime(\n s, \"%Y-%m-%dT%H:%M:%S.000Z\"\n ) # pragma no cover (This branch was used only when creating a download)\n elif len(s) >= 25:\n return datetime.datetime.strptime(s[:19], \"%Y-%m-%dT%H:%M:%S\") + (\n 1 if s[19] == \"-\" else -1\n ) * datetime.timedelta(hours=int(s[20:22]), minutes=int(s[23:25]))\n else:\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%SZ\")\n\n return GithubObject.__makeTransformedAttribute(value, str, parseDatetime)\n\n def _makeClassAttribute(self, klass, value):\n return GithubObject.__makeTransformedAttribute(\n value,\n dict,\n lambda value: klass(self._requester, self._headers, value, completed=False),\n )\n\n @staticmethod\n def _makeListOfStringsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, str)\n\n @staticmethod\n def _makeListOfIntsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, int)\n\n @staticmethod\n def _makeListOfDictsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, dict)\n\n @staticmethod\n def _makeListOfListOfStringsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, list)\n\n def _makeListOfClassesAttribute(self, klass, value):\n if isinstance(value, list) and all(\n isinstance(element, dict) for element in value\n ):\n return _ValuedAttribute(\n [\n klass(self._requester, self._headers, element, completed=False)\n for element in value\n ]\n )\n else:\n return _BadAttribute(value, [dict])\n\n def _makeDictOfStringsToClassesAttribute(self, klass, value):\n if isinstance(value, dict) and all(\n isinstance(key, str) and isinstance(element, dict)\n for key, element in value.items()\n ):\n return _ValuedAttribute(\n {\n key: klass(self._requester, self._headers, element, completed=False)\n for key, element in value.items()\n }\n )\n else:\n return _BadAttribute(value, {str: dict})\n\n @property\n def etag(self):\n \"\"\"\n :type: str\n \"\"\"\n return self._headers.get(Consts.RES_ETAG)\n\n @property\n def last_modified(self):\n \"\"\"\n :type: str\n \"\"\"\n return self._headers.get(Consts.RES_LAST_MODIFIED)\n\n def get__repr__(self, params):\n \"\"\"\n Converts the object to a nicely printable string.\n \"\"\"\n\n def format_params(params):\n items = list(params.items())\n for k, v in sorted(items, key=itemgetter(0), reverse=True):\n if isinstance(v, bytes):\n v = v.decode(\"utf-8\")\n if isinstance(v, str):\n v = f'\"{v}\"'\n yield f\"{k}={v}\"\n\n return \"{class_name}({params})\".format(\n class_name=self.__class__.__name__,\n params=\", \".join(list(format_params(params))),\n )\n\n\nclass NonCompletableGithubObject(GithubObject):\n def _completeIfNeeded(self):\n pass\n\n\nclass CompletableGithubObject(GithubObject):\n def __init__(self, requester, headers, attributes, completed):\n super().__init__(requester, headers, attributes, completed)\n self.__completed = completed\n\n def __eq__(self, other):\n return other.__class__ is self.__class__ and other._url.value == self._url.value\n\n def __ne__(self, other):\n return not self == other\n\n def _completeIfNotSet(self, value):\n if value is NotSet:\n self._completeIfNeeded()\n\n def _completeIfNeeded(self):\n if not self.__completed:\n self.__complete()\n\n def __complete(self):\n if self._url.value is None:\n raise GithubException.IncompletableObject(\n 400, \"Returned object contains no URL\", None\n )\n headers, data = self._requester.requestJsonAndCheck(\"GET\", self._url.value)\n self._storeAndUseAttributes(headers, data)\n self.__completed = True\n\n def update(self, additional_headers=None):\n \"\"\"\n Check and update the object with conditional request\n :rtype: Boolean value indicating whether the object is changed\n \"\"\"\n conditionalRequestHeader = dict()\n if self.etag is not None:\n conditionalRequestHeader[Consts.REQ_IF_NONE_MATCH] = self.etag\n if self.last_modified is not None:\n conditionalRequestHeader[Consts.REQ_IF_MODIFIED_SINCE] = self.last_modified\n if additional_headers is not None:\n conditionalRequestHeader.update(additional_headers)\n\n status, responseHeaders, output = self._requester.requestJson(\n \"GET\", self._url.value, headers=conditionalRequestHeader\n )\n if status == 304:\n return False\n else:\n headers, data = self._requester._Requester__check(\n status, responseHeaders, output\n )\n self._storeAndUseAttributes(headers, data)\n self.__completed = True\n return True\n", "path": "github/GithubObject.py"}], "after_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Andrew Scheller <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Jakub Wilk <[email protected]> #\n# Copyright 2016 Jannis Gebauer <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2016 Sam Corbett <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nimport datetime\nfrom operator import itemgetter\n\nfrom . import Consts, GithubException\n\n\nclass _NotSetType:\n def __repr__(self):\n return \"NotSet\"\n\n value = None\n\n\nNotSet = _NotSetType()\n\n\nclass _ValuedAttribute:\n def __init__(self, value):\n self.value = value\n\n\nclass _BadAttribute:\n def __init__(self, value, expectedType, exception=None):\n self.__value = value\n self.__expectedType = expectedType\n self.__exception = exception\n\n @property\n def value(self):\n raise GithubException.BadAttributeException(\n self.__value, self.__expectedType, self.__exception\n )\n\n\nclass GithubObject:\n \"\"\"\n Base class for all classes representing objects returned by the API.\n \"\"\"\n\n \"\"\"\n A global debug flag to enable header validation by requester for all objects\n \"\"\"\n CHECK_AFTER_INIT_FLAG = False\n\n @classmethod\n def setCheckAfterInitFlag(cls, flag):\n cls.CHECK_AFTER_INIT_FLAG = flag\n\n def __init__(self, requester, headers, attributes, completed):\n self._requester = requester\n self._initAttributes()\n self._storeAndUseAttributes(headers, attributes)\n\n # Ask requester to do some checking, for debug and test purpose\n # Since it's most handy to access and kinda all-knowing\n if self.CHECK_AFTER_INIT_FLAG: # pragma no branch (Flag always set in tests)\n requester.check_me(self)\n\n def _storeAndUseAttributes(self, headers, attributes):\n # Make sure headers are assigned before calling _useAttributes\n # (Some derived classes will use headers in _useAttributes)\n self._headers = headers\n self._rawData = attributes\n self._useAttributes(attributes)\n\n @property\n def raw_data(self):\n \"\"\"\n :type: dict\n \"\"\"\n self._completeIfNeeded()\n return self._rawData\n\n @property\n def raw_headers(self):\n \"\"\"\n :type: dict\n \"\"\"\n self._completeIfNeeded()\n return self._headers\n\n @staticmethod\n def _parentUrl(url):\n return \"/\".join(url.split(\"/\")[:-1])\n\n @staticmethod\n def __makeSimpleAttribute(value, type):\n if value is None or isinstance(value, type):\n return _ValuedAttribute(value)\n else:\n return _BadAttribute(value, type)\n\n @staticmethod\n def __makeSimpleListAttribute(value, type):\n if isinstance(value, list) and all(\n isinstance(element, type) for element in value\n ):\n return _ValuedAttribute(value)\n else:\n return _BadAttribute(value, [type])\n\n @staticmethod\n def __makeTransformedAttribute(value, type, transform):\n if value is None:\n return _ValuedAttribute(None)\n elif isinstance(value, type):\n try:\n return _ValuedAttribute(transform(value))\n except Exception as e:\n return _BadAttribute(value, type, e)\n else:\n return _BadAttribute(value, type)\n\n @staticmethod\n def _makeStringAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, str)\n\n @staticmethod\n def _makeIntAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, int)\n\n @staticmethod\n def _makeFloatAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, float)\n\n @staticmethod\n def _makeBoolAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, bool)\n\n @staticmethod\n def _makeDictAttribute(value):\n return GithubObject.__makeSimpleAttribute(value, dict)\n\n @staticmethod\n def _makeTimestampAttribute(value):\n return GithubObject.__makeTransformedAttribute(\n value, int, datetime.datetime.utcfromtimestamp\n )\n\n @staticmethod\n def _makeDatetimeAttribute(value):\n def parseDatetime(s):\n if (\n len(s) == 24\n ): # pragma no branch (This branch was used only when creating a download)\n # The Downloads API has been removed. I'm keeping this branch because I have no mean\n # to check if it's really useless now.\n return datetime.datetime.strptime(\n s, \"%Y-%m-%dT%H:%M:%S.000Z\"\n ) # pragma no cover (This branch was used only when creating a download)\n elif len(s) >= 25:\n return datetime.datetime.strptime(s[:19], \"%Y-%m-%dT%H:%M:%S\") + (\n 1 if s[19] == \"-\" else -1\n ) * datetime.timedelta(hours=int(s[20:22]), minutes=int(s[23:25]))\n else:\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%SZ\")\n\n return GithubObject.__makeTransformedAttribute(value, str, parseDatetime)\n\n def _makeClassAttribute(self, klass, value):\n return GithubObject.__makeTransformedAttribute(\n value,\n dict,\n lambda value: klass(self._requester, self._headers, value, completed=False),\n )\n\n @staticmethod\n def _makeListOfStringsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, str)\n\n @staticmethod\n def _makeListOfIntsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, int)\n\n @staticmethod\n def _makeListOfDictsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, dict)\n\n @staticmethod\n def _makeListOfListOfStringsAttribute(value):\n return GithubObject.__makeSimpleListAttribute(value, list)\n\n def _makeListOfClassesAttribute(self, klass, value):\n if isinstance(value, list) and all(\n isinstance(element, dict) for element in value\n ):\n return _ValuedAttribute(\n [\n klass(self._requester, self._headers, element, completed=False)\n for element in value\n ]\n )\n else:\n return _BadAttribute(value, [dict])\n\n def _makeDictOfStringsToClassesAttribute(self, klass, value):\n if isinstance(value, dict) and all(\n isinstance(key, str) and isinstance(element, dict)\n for key, element in value.items()\n ):\n return _ValuedAttribute(\n {\n key: klass(self._requester, self._headers, element, completed=False)\n for key, element in value.items()\n }\n )\n else:\n return _BadAttribute(value, {str: dict})\n\n @property\n def etag(self):\n \"\"\"\n :type: str\n \"\"\"\n return self._headers.get(Consts.RES_ETAG)\n\n @property\n def last_modified(self):\n \"\"\"\n :type: str\n \"\"\"\n return self._headers.get(Consts.RES_LAST_MODIFIED)\n\n def get__repr__(self, params):\n \"\"\"\n Converts the object to a nicely printable string.\n \"\"\"\n\n def format_params(params):\n items = list(params.items())\n for k, v in sorted(items, key=itemgetter(0), reverse=True):\n if isinstance(v, bytes):\n v = v.decode(\"utf-8\")\n if isinstance(v, str):\n v = f'\"{v}\"'\n yield f\"{k}={v}\"\n\n return \"{class_name}({params})\".format(\n class_name=self.__class__.__name__,\n params=\", \".join(list(format_params(params))),\n )\n\n\nclass NonCompletableGithubObject(GithubObject):\n def _completeIfNeeded(self):\n pass\n\n\nclass CompletableGithubObject(GithubObject):\n def __init__(self, requester, headers, attributes, completed):\n super().__init__(requester, headers, attributes, completed)\n self.__completed = completed\n\n def __eq__(self, other):\n return other.__class__ is self.__class__ and other._url.value == self._url.value\n\n def __hash__(self):\n return hash(self._url.value)\n\n def __ne__(self, other):\n return not self == other\n\n def _completeIfNotSet(self, value):\n if value is NotSet:\n self._completeIfNeeded()\n\n def _completeIfNeeded(self):\n if not self.__completed:\n self.__complete()\n\n def __complete(self):\n if self._url.value is None:\n raise GithubException.IncompletableObject(\n 400, \"Returned object contains no URL\", None\n )\n headers, data = self._requester.requestJsonAndCheck(\"GET\", self._url.value)\n self._storeAndUseAttributes(headers, data)\n self.__completed = True\n\n def update(self, additional_headers=None):\n \"\"\"\n Check and update the object with conditional request\n :rtype: Boolean value indicating whether the object is changed\n \"\"\"\n conditionalRequestHeader = dict()\n if self.etag is not None:\n conditionalRequestHeader[Consts.REQ_IF_NONE_MATCH] = self.etag\n if self.last_modified is not None:\n conditionalRequestHeader[Consts.REQ_IF_MODIFIED_SINCE] = self.last_modified\n if additional_headers is not None:\n conditionalRequestHeader.update(additional_headers)\n\n status, responseHeaders, output = self._requester.requestJson(\n \"GET\", self._url.value, headers=conditionalRequestHeader\n )\n if status == 304:\n return False\n else:\n headers, data = self._requester._Requester__check(\n status, responseHeaders, output\n )\n self._storeAndUseAttributes(headers, data)\n self.__completed = True\n return True\n", "path": "github/GithubObject.py"}]}
| 3,717 | 111 |
gh_patches_debug_13359
|
rasdani/github-patches
|
git_diff
|
pypa__cibuildwheel-1031
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPy has released bug fixes and a python3.9
### Description
PyPy released version v7.3.8. It would be nice to be able to use it in cibuildwheel, including the newly released python3.9
### Build log
_No response_
### CI config
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/update_pythons.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from __future__ import annotations
4
5 import copy
6 import difflib
7 import logging
8 from pathlib import Path
9 from typing import Any, Union
10
11 import click
12 import requests
13 import rich
14 import tomli
15 from packaging.specifiers import Specifier
16 from packaging.version import Version
17 from rich.logging import RichHandler
18 from rich.syntax import Syntax
19
20 from cibuildwheel.extra import dump_python_configurations
21 from cibuildwheel.typing import Final, Literal, TypedDict
22
23 log = logging.getLogger("cibw")
24
25 # Looking up the dir instead of using utils.resources_dir
26 # since we want to write to it.
27 DIR: Final[Path] = Path(__file__).parent.parent.resolve()
28 RESOURCES_DIR: Final[Path] = DIR / "cibuildwheel/resources"
29
30
31 ArchStr = Literal["32", "64", "ARM64"]
32
33
34 class ConfigWinCP(TypedDict):
35 identifier: str
36 version: str
37 arch: str
38
39
40 class ConfigWinPP(TypedDict):
41 identifier: str
42 version: str
43 arch: str
44 url: str
45
46
47 class ConfigMacOS(TypedDict):
48 identifier: str
49 version: str
50 url: str
51
52
53 AnyConfig = Union[ConfigWinCP, ConfigWinPP, ConfigMacOS]
54
55
56 # The following set of "Versions" classes allow the initial call to the APIs to
57 # be cached and reused in the `update_version_*` methods.
58
59
60 class WindowsVersions:
61 def __init__(self, arch_str: ArchStr) -> None:
62
63 response = requests.get("https://api.nuget.org/v3/index.json")
64 response.raise_for_status()
65 api_info = response.json()
66
67 for resource in api_info["resources"]:
68 if resource["@type"] == "PackageBaseAddress/3.0.0":
69 endpoint = resource["@id"]
70
71 ARCH_DICT = {"32": "win32", "64": "win_amd64", "ARM64": "win_arm64"}
72 PACKAGE_DICT = {"32": "pythonx86", "64": "python", "ARM64": "pythonarm64"}
73
74 self.arch_str = arch_str
75 self.arch = ARCH_DICT[arch_str]
76 package = PACKAGE_DICT[arch_str]
77
78 response = requests.get(f"{endpoint}{package}/index.json")
79 response.raise_for_status()
80 cp_info = response.json()
81
82 self.version_dict = {Version(v): v for v in cp_info["versions"]}
83
84 def update_version_windows(self, spec: Specifier) -> ConfigWinCP | None:
85
86 # Specifier.filter selects all non pre-releases that match the spec,
87 # unless there are only pre-releases, then it selects pre-releases
88 # instead (like pip)
89 unsorted_versions = spec.filter(self.version_dict)
90 versions = sorted(unsorted_versions, reverse=True)
91
92 log.debug(f"Windows {self.arch} {spec} has {', '.join(str(v) for v in versions)}")
93
94 if not versions:
95 return None
96
97 version = versions[0]
98 identifier = f"cp{version.major}{version.minor}-{self.arch}"
99 return ConfigWinCP(
100 identifier=identifier,
101 version=self.version_dict[version],
102 arch=self.arch_str,
103 )
104
105
106 class PyPyVersions:
107 def __init__(self, arch_str: ArchStr):
108
109 response = requests.get("https://downloads.python.org/pypy/versions.json")
110 response.raise_for_status()
111
112 releases = [r for r in response.json() if r["pypy_version"] != "nightly"]
113 for release in releases:
114 release["pypy_version"] = Version(release["pypy_version"])
115 release["python_version"] = Version(release["python_version"])
116
117 self.releases = [
118 r
119 for r in releases
120 if not r["pypy_version"].is_prerelease and not r["pypy_version"].is_devrelease
121 ]
122 self.arch = arch_str
123
124 def get_arch_file(self, release: dict[str, Any]) -> str:
125 urls: list[str] = [
126 rf["download_url"]
127 for rf in release["files"]
128 if "" in rf["platform"] == f"win{self.arch}"
129 ]
130 return urls[0] if urls else ""
131
132 def update_version_windows(self, spec: Specifier) -> ConfigWinCP:
133 releases = [r for r in self.releases if spec.contains(r["python_version"])]
134 releases = sorted(releases, key=lambda r: r["pypy_version"]) # type: ignore[no-any-return]
135 releases = [r for r in releases if self.get_arch_file(r)]
136
137 if not releases:
138 raise RuntimeError(f"PyPy Win {self.arch} not found for {spec}! {self.releases}")
139
140 version_arch = "win32" if self.arch == "32" else "win_amd64"
141
142 release = releases[-1]
143 version = release["python_version"]
144 identifier = f"pp{version.major}{version.minor}-{version_arch}"
145 url = self.get_arch_file(release)
146
147 return ConfigWinPP(
148 identifier=identifier,
149 version=f"{version.major}.{version.minor}",
150 arch=self.arch,
151 url=url,
152 )
153
154 def update_version_macos(self, spec: Specifier) -> ConfigMacOS:
155 if self.arch != "64":
156 raise RuntimeError("Other archs not supported yet on macOS")
157
158 releases = [r for r in self.releases if spec.contains(r["python_version"])]
159 releases = sorted(releases, key=lambda r: r["pypy_version"]) # type: ignore[no-any-return]
160
161 if not releases:
162 raise RuntimeError(f"PyPy macOS {self.arch} not found for {spec}!")
163
164 release = releases[-1]
165 version = release["python_version"]
166 identifier = f"pp{version.major}{version.minor}-macosx_x86_64"
167
168 (url,) = (
169 rf["download_url"]
170 for rf in release["files"]
171 if "" in rf["platform"] == "darwin" and rf["arch"] == "x64"
172 )
173
174 return ConfigMacOS(
175 identifier=identifier,
176 version=f"{version.major}.{version.minor}",
177 url=url,
178 )
179
180
181 class CPythonVersions:
182 def __init__(self) -> None:
183
184 response = requests.get(
185 "https://www.python.org/api/v2/downloads/release/?is_published=true"
186 )
187 response.raise_for_status()
188
189 releases_info = response.json()
190
191 self.versions_dict: dict[Version, int] = {}
192 for release in releases_info:
193 # Removing the prefix, Python 3.9 would use: release["name"].removeprefix("Python ")
194 version = Version(release["name"][7:])
195
196 uri = int(release["resource_uri"].rstrip("/").split("/")[-1])
197 self.versions_dict[version] = uri
198
199 def update_version_macos(
200 self, identifier: str, version: Version, spec: Specifier
201 ) -> ConfigMacOS | None:
202
203 # see note above on Specifier.filter
204 unsorted_versions = spec.filter(self.versions_dict)
205 sorted_versions = sorted(unsorted_versions, reverse=True)
206
207 if version <= Version("3.8.9999"):
208 file_ident = "macosx10.9.pkg"
209 else:
210 file_ident = "macos11.pkg"
211
212 for new_version in sorted_versions:
213 # Find the first patch version that contains the requested file
214 uri = self.versions_dict[new_version]
215 response = requests.get(
216 f"https://www.python.org/api/v2/downloads/release_file/?release={uri}"
217 )
218 response.raise_for_status()
219 file_info = response.json()
220
221 urls = [rf["url"] for rf in file_info if file_ident in rf["url"]]
222 if urls:
223 return ConfigMacOS(
224 identifier=identifier,
225 version=f"{new_version.major}.{new_version.minor}",
226 url=urls[0],
227 )
228
229 return None
230
231
232 # This is a universal interface to all the above Versions classes. Given an
233 # identifier, it updates a config dict.
234
235
236 class AllVersions:
237 def __init__(self) -> None:
238 self.windows_32 = WindowsVersions("32")
239 self.windows_64 = WindowsVersions("64")
240 self.windows_arm64 = WindowsVersions("ARM64")
241 self.windows_pypy_64 = PyPyVersions("64")
242
243 self.macos_cpython = CPythonVersions()
244 self.macos_pypy = PyPyVersions("64")
245
246 def update_config(self, config: dict[str, str]) -> None:
247 identifier = config["identifier"]
248 version = Version(config["version"])
249 spec = Specifier(f"=={version.major}.{version.minor}.*")
250 log.info(f"Reading in '{identifier}' -> {spec} @ {version}")
251 orig_config = copy.copy(config)
252 config_update: AnyConfig | None = None
253
254 # We need to use ** in update due to MyPy (probably a bug)
255 if "macos" in identifier:
256 if identifier.startswith("cp"):
257 config_update = self.macos_cpython.update_version_macos(identifier, version, spec)
258 elif identifier.startswith("pp"):
259 config_update = self.macos_pypy.update_version_macos(spec)
260 elif "win32" in identifier:
261 if identifier.startswith("cp"):
262 config_update = self.windows_32.update_version_windows(spec)
263 elif "win_amd64" in identifier:
264 if identifier.startswith("cp"):
265 config_update = self.windows_64.update_version_windows(spec)
266 elif identifier.startswith("pp"):
267 config_update = self.windows_pypy_64.update_version_windows(spec)
268 elif "win_arm64" in identifier:
269 if identifier.startswith("cp"):
270 config_update = self.windows_arm64.update_version_windows(spec)
271
272 assert config_update is not None, f"{identifier} not found!"
273 config.update(**config_update)
274
275 if config != orig_config:
276 log.info(f" Updated {orig_config} to {config}")
277
278
279 @click.command()
280 @click.option("--force", is_flag=True)
281 @click.option(
282 "--level", default="INFO", type=click.Choice(["WARNING", "INFO", "DEBUG"], case_sensitive=False)
283 )
284 def update_pythons(force: bool, level: str) -> None:
285
286 logging.basicConfig(
287 level="INFO",
288 format="%(message)s",
289 datefmt="[%X]",
290 handlers=[RichHandler(rich_tracebacks=True, markup=True)],
291 )
292 log.setLevel(level)
293
294 all_versions = AllVersions()
295 toml_file_path = RESOURCES_DIR / "build-platforms.toml"
296
297 original_toml = toml_file_path.read_text()
298 with toml_file_path.open("rb") as f:
299 configs = tomli.load(f)
300
301 for config in configs["windows"]["python_configurations"]:
302 all_versions.update_config(config)
303
304 for config in configs["macos"]["python_configurations"]:
305 all_versions.update_config(config)
306
307 result_toml = dump_python_configurations(configs)
308
309 rich.print() # spacer
310
311 if original_toml == result_toml:
312 rich.print("[green]Check complete, Python configurations unchanged.")
313 return
314
315 rich.print("Python configurations updated.")
316 rich.print("Changes:")
317 rich.print()
318
319 toml_relpath = toml_file_path.relative_to(DIR).as_posix()
320 diff_lines = difflib.unified_diff(
321 original_toml.splitlines(keepends=True),
322 result_toml.splitlines(keepends=True),
323 fromfile=toml_relpath,
324 tofile=toml_relpath,
325 )
326 rich.print(Syntax("".join(diff_lines), "diff", theme="ansi_light"))
327 rich.print()
328
329 if force:
330 toml_file_path.write_text(result_toml)
331 rich.print("[green]TOML file updated.")
332 else:
333 rich.print("[yellow]File left unchanged. Use --force flag to update.")
334
335
336 if __name__ == "__main__":
337 update_pythons()
338
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bin/update_pythons.py b/bin/update_pythons.py
--- a/bin/update_pythons.py
+++ b/bin/update_pythons.py
@@ -109,7 +109,12 @@
response = requests.get("https://downloads.python.org/pypy/versions.json")
response.raise_for_status()
- releases = [r for r in response.json() if r["pypy_version"] != "nightly"]
+ releases = [
+ r
+ for r in response.json()
+ if r["pypy_version"] != "nightly"
+ and f'{r["python_version"]}-{r["pypy_version"]}' != "3.7.12-7.3.8"
+ ]
for release in releases:
release["pypy_version"] = Version(release["pypy_version"])
release["python_version"] = Version(release["python_version"])
|
{"golden_diff": "diff --git a/bin/update_pythons.py b/bin/update_pythons.py\n--- a/bin/update_pythons.py\n+++ b/bin/update_pythons.py\n@@ -109,7 +109,12 @@\n response = requests.get(\"https://downloads.python.org/pypy/versions.json\")\n response.raise_for_status()\n \n- releases = [r for r in response.json() if r[\"pypy_version\"] != \"nightly\"]\n+ releases = [\n+ r\n+ for r in response.json()\n+ if r[\"pypy_version\"] != \"nightly\"\n+ and f'{r[\"python_version\"]}-{r[\"pypy_version\"]}' != \"3.7.12-7.3.8\"\n+ ]\n for release in releases:\n release[\"pypy_version\"] = Version(release[\"pypy_version\"])\n release[\"python_version\"] = Version(release[\"python_version\"])\n", "issue": "PyPy has released bug fixes and a python3.9\n### Description\n\nPyPy released version v7.3.8. It would be nice to be able to use it in cibuildwheel, including the newly released python3.9\n\n### Build log\n\n_No response_\n\n### CI config\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport copy\nimport difflib\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Union\n\nimport click\nimport requests\nimport rich\nimport tomli\nfrom packaging.specifiers import Specifier\nfrom packaging.version import Version\nfrom rich.logging import RichHandler\nfrom rich.syntax import Syntax\n\nfrom cibuildwheel.extra import dump_python_configurations\nfrom cibuildwheel.typing import Final, Literal, TypedDict\n\nlog = logging.getLogger(\"cibw\")\n\n# Looking up the dir instead of using utils.resources_dir\n# since we want to write to it.\nDIR: Final[Path] = Path(__file__).parent.parent.resolve()\nRESOURCES_DIR: Final[Path] = DIR / \"cibuildwheel/resources\"\n\n\nArchStr = Literal[\"32\", \"64\", \"ARM64\"]\n\n\nclass ConfigWinCP(TypedDict):\n identifier: str\n version: str\n arch: str\n\n\nclass ConfigWinPP(TypedDict):\n identifier: str\n version: str\n arch: str\n url: str\n\n\nclass ConfigMacOS(TypedDict):\n identifier: str\n version: str\n url: str\n\n\nAnyConfig = Union[ConfigWinCP, ConfigWinPP, ConfigMacOS]\n\n\n# The following set of \"Versions\" classes allow the initial call to the APIs to\n# be cached and reused in the `update_version_*` methods.\n\n\nclass WindowsVersions:\n def __init__(self, arch_str: ArchStr) -> None:\n\n response = requests.get(\"https://api.nuget.org/v3/index.json\")\n response.raise_for_status()\n api_info = response.json()\n\n for resource in api_info[\"resources\"]:\n if resource[\"@type\"] == \"PackageBaseAddress/3.0.0\":\n endpoint = resource[\"@id\"]\n\n ARCH_DICT = {\"32\": \"win32\", \"64\": \"win_amd64\", \"ARM64\": \"win_arm64\"}\n PACKAGE_DICT = {\"32\": \"pythonx86\", \"64\": \"python\", \"ARM64\": \"pythonarm64\"}\n\n self.arch_str = arch_str\n self.arch = ARCH_DICT[arch_str]\n package = PACKAGE_DICT[arch_str]\n\n response = requests.get(f\"{endpoint}{package}/index.json\")\n response.raise_for_status()\n cp_info = response.json()\n\n self.version_dict = {Version(v): v for v in cp_info[\"versions\"]}\n\n def update_version_windows(self, spec: Specifier) -> ConfigWinCP | None:\n\n # Specifier.filter selects all non pre-releases that match the spec,\n # unless there are only pre-releases, then it selects pre-releases\n # instead (like pip)\n unsorted_versions = spec.filter(self.version_dict)\n versions = sorted(unsorted_versions, reverse=True)\n\n log.debug(f\"Windows {self.arch} {spec} has {', '.join(str(v) for v in versions)}\")\n\n if not versions:\n return None\n\n version = versions[0]\n identifier = f\"cp{version.major}{version.minor}-{self.arch}\"\n return ConfigWinCP(\n identifier=identifier,\n version=self.version_dict[version],\n arch=self.arch_str,\n )\n\n\nclass PyPyVersions:\n def __init__(self, arch_str: ArchStr):\n\n response = requests.get(\"https://downloads.python.org/pypy/versions.json\")\n response.raise_for_status()\n\n releases = [r for r in response.json() if r[\"pypy_version\"] != \"nightly\"]\n for release in releases:\n release[\"pypy_version\"] = Version(release[\"pypy_version\"])\n release[\"python_version\"] = Version(release[\"python_version\"])\n\n self.releases = [\n r\n for r in releases\n if not r[\"pypy_version\"].is_prerelease and not r[\"pypy_version\"].is_devrelease\n ]\n self.arch = arch_str\n\n def get_arch_file(self, release: dict[str, Any]) -> str:\n urls: list[str] = [\n rf[\"download_url\"]\n for rf in release[\"files\"]\n if \"\" in rf[\"platform\"] == f\"win{self.arch}\"\n ]\n return urls[0] if urls else \"\"\n\n def update_version_windows(self, spec: Specifier) -> ConfigWinCP:\n releases = [r for r in self.releases if spec.contains(r[\"python_version\"])]\n releases = sorted(releases, key=lambda r: r[\"pypy_version\"]) # type: ignore[no-any-return]\n releases = [r for r in releases if self.get_arch_file(r)]\n\n if not releases:\n raise RuntimeError(f\"PyPy Win {self.arch} not found for {spec}! {self.releases}\")\n\n version_arch = \"win32\" if self.arch == \"32\" else \"win_amd64\"\n\n release = releases[-1]\n version = release[\"python_version\"]\n identifier = f\"pp{version.major}{version.minor}-{version_arch}\"\n url = self.get_arch_file(release)\n\n return ConfigWinPP(\n identifier=identifier,\n version=f\"{version.major}.{version.minor}\",\n arch=self.arch,\n url=url,\n )\n\n def update_version_macos(self, spec: Specifier) -> ConfigMacOS:\n if self.arch != \"64\":\n raise RuntimeError(\"Other archs not supported yet on macOS\")\n\n releases = [r for r in self.releases if spec.contains(r[\"python_version\"])]\n releases = sorted(releases, key=lambda r: r[\"pypy_version\"]) # type: ignore[no-any-return]\n\n if not releases:\n raise RuntimeError(f\"PyPy macOS {self.arch} not found for {spec}!\")\n\n release = releases[-1]\n version = release[\"python_version\"]\n identifier = f\"pp{version.major}{version.minor}-macosx_x86_64\"\n\n (url,) = (\n rf[\"download_url\"]\n for rf in release[\"files\"]\n if \"\" in rf[\"platform\"] == \"darwin\" and rf[\"arch\"] == \"x64\"\n )\n\n return ConfigMacOS(\n identifier=identifier,\n version=f\"{version.major}.{version.minor}\",\n url=url,\n )\n\n\nclass CPythonVersions:\n def __init__(self) -> None:\n\n response = requests.get(\n \"https://www.python.org/api/v2/downloads/release/?is_published=true\"\n )\n response.raise_for_status()\n\n releases_info = response.json()\n\n self.versions_dict: dict[Version, int] = {}\n for release in releases_info:\n # Removing the prefix, Python 3.9 would use: release[\"name\"].removeprefix(\"Python \")\n version = Version(release[\"name\"][7:])\n\n uri = int(release[\"resource_uri\"].rstrip(\"/\").split(\"/\")[-1])\n self.versions_dict[version] = uri\n\n def update_version_macos(\n self, identifier: str, version: Version, spec: Specifier\n ) -> ConfigMacOS | None:\n\n # see note above on Specifier.filter\n unsorted_versions = spec.filter(self.versions_dict)\n sorted_versions = sorted(unsorted_versions, reverse=True)\n\n if version <= Version(\"3.8.9999\"):\n file_ident = \"macosx10.9.pkg\"\n else:\n file_ident = \"macos11.pkg\"\n\n for new_version in sorted_versions:\n # Find the first patch version that contains the requested file\n uri = self.versions_dict[new_version]\n response = requests.get(\n f\"https://www.python.org/api/v2/downloads/release_file/?release={uri}\"\n )\n response.raise_for_status()\n file_info = response.json()\n\n urls = [rf[\"url\"] for rf in file_info if file_ident in rf[\"url\"]]\n if urls:\n return ConfigMacOS(\n identifier=identifier,\n version=f\"{new_version.major}.{new_version.minor}\",\n url=urls[0],\n )\n\n return None\n\n\n# This is a universal interface to all the above Versions classes. Given an\n# identifier, it updates a config dict.\n\n\nclass AllVersions:\n def __init__(self) -> None:\n self.windows_32 = WindowsVersions(\"32\")\n self.windows_64 = WindowsVersions(\"64\")\n self.windows_arm64 = WindowsVersions(\"ARM64\")\n self.windows_pypy_64 = PyPyVersions(\"64\")\n\n self.macos_cpython = CPythonVersions()\n self.macos_pypy = PyPyVersions(\"64\")\n\n def update_config(self, config: dict[str, str]) -> None:\n identifier = config[\"identifier\"]\n version = Version(config[\"version\"])\n spec = Specifier(f\"=={version.major}.{version.minor}.*\")\n log.info(f\"Reading in '{identifier}' -> {spec} @ {version}\")\n orig_config = copy.copy(config)\n config_update: AnyConfig | None = None\n\n # We need to use ** in update due to MyPy (probably a bug)\n if \"macos\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.macos_cpython.update_version_macos(identifier, version, spec)\n elif identifier.startswith(\"pp\"):\n config_update = self.macos_pypy.update_version_macos(spec)\n elif \"win32\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_32.update_version_windows(spec)\n elif \"win_amd64\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_64.update_version_windows(spec)\n elif identifier.startswith(\"pp\"):\n config_update = self.windows_pypy_64.update_version_windows(spec)\n elif \"win_arm64\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_arm64.update_version_windows(spec)\n\n assert config_update is not None, f\"{identifier} not found!\"\n config.update(**config_update)\n\n if config != orig_config:\n log.info(f\" Updated {orig_config} to {config}\")\n\n\[email protected]()\[email protected](\"--force\", is_flag=True)\[email protected](\n \"--level\", default=\"INFO\", type=click.Choice([\"WARNING\", \"INFO\", \"DEBUG\"], case_sensitive=False)\n)\ndef update_pythons(force: bool, level: str) -> None:\n\n logging.basicConfig(\n level=\"INFO\",\n format=\"%(message)s\",\n datefmt=\"[%X]\",\n handlers=[RichHandler(rich_tracebacks=True, markup=True)],\n )\n log.setLevel(level)\n\n all_versions = AllVersions()\n toml_file_path = RESOURCES_DIR / \"build-platforms.toml\"\n\n original_toml = toml_file_path.read_text()\n with toml_file_path.open(\"rb\") as f:\n configs = tomli.load(f)\n\n for config in configs[\"windows\"][\"python_configurations\"]:\n all_versions.update_config(config)\n\n for config in configs[\"macos\"][\"python_configurations\"]:\n all_versions.update_config(config)\n\n result_toml = dump_python_configurations(configs)\n\n rich.print() # spacer\n\n if original_toml == result_toml:\n rich.print(\"[green]Check complete, Python configurations unchanged.\")\n return\n\n rich.print(\"Python configurations updated.\")\n rich.print(\"Changes:\")\n rich.print()\n\n toml_relpath = toml_file_path.relative_to(DIR).as_posix()\n diff_lines = difflib.unified_diff(\n original_toml.splitlines(keepends=True),\n result_toml.splitlines(keepends=True),\n fromfile=toml_relpath,\n tofile=toml_relpath,\n )\n rich.print(Syntax(\"\".join(diff_lines), \"diff\", theme=\"ansi_light\"))\n rich.print()\n\n if force:\n toml_file_path.write_text(result_toml)\n rich.print(\"[green]TOML file updated.\")\n else:\n rich.print(\"[yellow]File left unchanged. Use --force flag to update.\")\n\n\nif __name__ == \"__main__\":\n update_pythons()\n", "path": "bin/update_pythons.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nimport copy\nimport difflib\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Union\n\nimport click\nimport requests\nimport rich\nimport tomli\nfrom packaging.specifiers import Specifier\nfrom packaging.version import Version\nfrom rich.logging import RichHandler\nfrom rich.syntax import Syntax\n\nfrom cibuildwheel.extra import dump_python_configurations\nfrom cibuildwheel.typing import Final, Literal, TypedDict\n\nlog = logging.getLogger(\"cibw\")\n\n# Looking up the dir instead of using utils.resources_dir\n# since we want to write to it.\nDIR: Final[Path] = Path(__file__).parent.parent.resolve()\nRESOURCES_DIR: Final[Path] = DIR / \"cibuildwheel/resources\"\n\n\nArchStr = Literal[\"32\", \"64\", \"ARM64\"]\n\n\nclass ConfigWinCP(TypedDict):\n identifier: str\n version: str\n arch: str\n\n\nclass ConfigWinPP(TypedDict):\n identifier: str\n version: str\n arch: str\n url: str\n\n\nclass ConfigMacOS(TypedDict):\n identifier: str\n version: str\n url: str\n\n\nAnyConfig = Union[ConfigWinCP, ConfigWinPP, ConfigMacOS]\n\n\n# The following set of \"Versions\" classes allow the initial call to the APIs to\n# be cached and reused in the `update_version_*` methods.\n\n\nclass WindowsVersions:\n def __init__(self, arch_str: ArchStr) -> None:\n\n response = requests.get(\"https://api.nuget.org/v3/index.json\")\n response.raise_for_status()\n api_info = response.json()\n\n for resource in api_info[\"resources\"]:\n if resource[\"@type\"] == \"PackageBaseAddress/3.0.0\":\n endpoint = resource[\"@id\"]\n\n ARCH_DICT = {\"32\": \"win32\", \"64\": \"win_amd64\", \"ARM64\": \"win_arm64\"}\n PACKAGE_DICT = {\"32\": \"pythonx86\", \"64\": \"python\", \"ARM64\": \"pythonarm64\"}\n\n self.arch_str = arch_str\n self.arch = ARCH_DICT[arch_str]\n package = PACKAGE_DICT[arch_str]\n\n response = requests.get(f\"{endpoint}{package}/index.json\")\n response.raise_for_status()\n cp_info = response.json()\n\n self.version_dict = {Version(v): v for v in cp_info[\"versions\"]}\n\n def update_version_windows(self, spec: Specifier) -> ConfigWinCP | None:\n\n # Specifier.filter selects all non pre-releases that match the spec,\n # unless there are only pre-releases, then it selects pre-releases\n # instead (like pip)\n unsorted_versions = spec.filter(self.version_dict)\n versions = sorted(unsorted_versions, reverse=True)\n\n log.debug(f\"Windows {self.arch} {spec} has {', '.join(str(v) for v in versions)}\")\n\n if not versions:\n return None\n\n version = versions[0]\n identifier = f\"cp{version.major}{version.minor}-{self.arch}\"\n return ConfigWinCP(\n identifier=identifier,\n version=self.version_dict[version],\n arch=self.arch_str,\n )\n\n\nclass PyPyVersions:\n def __init__(self, arch_str: ArchStr):\n\n response = requests.get(\"https://downloads.python.org/pypy/versions.json\")\n response.raise_for_status()\n\n releases = [\n r\n for r in response.json()\n if r[\"pypy_version\"] != \"nightly\"\n and f'{r[\"python_version\"]}-{r[\"pypy_version\"]}' != \"3.7.12-7.3.8\"\n ]\n for release in releases:\n release[\"pypy_version\"] = Version(release[\"pypy_version\"])\n release[\"python_version\"] = Version(release[\"python_version\"])\n\n self.releases = [\n r\n for r in releases\n if not r[\"pypy_version\"].is_prerelease and not r[\"pypy_version\"].is_devrelease\n ]\n self.arch = arch_str\n\n def get_arch_file(self, release: dict[str, Any]) -> str:\n urls: list[str] = [\n rf[\"download_url\"]\n for rf in release[\"files\"]\n if \"\" in rf[\"platform\"] == f\"win{self.arch}\"\n ]\n return urls[0] if urls else \"\"\n\n def update_version_windows(self, spec: Specifier) -> ConfigWinCP:\n releases = [r for r in self.releases if spec.contains(r[\"python_version\"])]\n releases = sorted(releases, key=lambda r: r[\"pypy_version\"]) # type: ignore[no-any-return]\n releases = [r for r in releases if self.get_arch_file(r)]\n\n if not releases:\n raise RuntimeError(f\"PyPy Win {self.arch} not found for {spec}! {self.releases}\")\n\n version_arch = \"win32\" if self.arch == \"32\" else \"win_amd64\"\n\n release = releases[-1]\n version = release[\"python_version\"]\n identifier = f\"pp{version.major}{version.minor}-{version_arch}\"\n url = self.get_arch_file(release)\n\n return ConfigWinPP(\n identifier=identifier,\n version=f\"{version.major}.{version.minor}\",\n arch=self.arch,\n url=url,\n )\n\n def update_version_macos(self, spec: Specifier) -> ConfigMacOS:\n if self.arch != \"64\":\n raise RuntimeError(\"Other archs not supported yet on macOS\")\n\n releases = [r for r in self.releases if spec.contains(r[\"python_version\"])]\n releases = sorted(releases, key=lambda r: r[\"pypy_version\"]) # type: ignore[no-any-return]\n\n if not releases:\n raise RuntimeError(f\"PyPy macOS {self.arch} not found for {spec}!\")\n\n release = releases[-1]\n version = release[\"python_version\"]\n identifier = f\"pp{version.major}{version.minor}-macosx_x86_64\"\n\n (url,) = (\n rf[\"download_url\"]\n for rf in release[\"files\"]\n if \"\" in rf[\"platform\"] == \"darwin\" and rf[\"arch\"] == \"x64\"\n )\n\n return ConfigMacOS(\n identifier=identifier,\n version=f\"{version.major}.{version.minor}\",\n url=url,\n )\n\n\nclass CPythonVersions:\n def __init__(self) -> None:\n\n response = requests.get(\n \"https://www.python.org/api/v2/downloads/release/?is_published=true\"\n )\n response.raise_for_status()\n\n releases_info = response.json()\n\n self.versions_dict: dict[Version, int] = {}\n for release in releases_info:\n # Removing the prefix, Python 3.9 would use: release[\"name\"].removeprefix(\"Python \")\n version = Version(release[\"name\"][7:])\n\n uri = int(release[\"resource_uri\"].rstrip(\"/\").split(\"/\")[-1])\n self.versions_dict[version] = uri\n\n def update_version_macos(\n self, identifier: str, version: Version, spec: Specifier\n ) -> ConfigMacOS | None:\n\n # see note above on Specifier.filter\n unsorted_versions = spec.filter(self.versions_dict)\n sorted_versions = sorted(unsorted_versions, reverse=True)\n\n if version <= Version(\"3.8.9999\"):\n file_ident = \"macosx10.9.pkg\"\n else:\n file_ident = \"macos11.pkg\"\n\n for new_version in sorted_versions:\n # Find the first patch version that contains the requested file\n uri = self.versions_dict[new_version]\n response = requests.get(\n f\"https://www.python.org/api/v2/downloads/release_file/?release={uri}\"\n )\n response.raise_for_status()\n file_info = response.json()\n\n urls = [rf[\"url\"] for rf in file_info if file_ident in rf[\"url\"]]\n if urls:\n return ConfigMacOS(\n identifier=identifier,\n version=f\"{new_version.major}.{new_version.minor}\",\n url=urls[0],\n )\n\n return None\n\n\n# This is a universal interface to all the above Versions classes. Given an\n# identifier, it updates a config dict.\n\n\nclass AllVersions:\n def __init__(self) -> None:\n self.windows_32 = WindowsVersions(\"32\")\n self.windows_64 = WindowsVersions(\"64\")\n self.windows_arm64 = WindowsVersions(\"ARM64\")\n self.windows_pypy_64 = PyPyVersions(\"64\")\n\n self.macos_cpython = CPythonVersions()\n self.macos_pypy = PyPyVersions(\"64\")\n\n def update_config(self, config: dict[str, str]) -> None:\n identifier = config[\"identifier\"]\n version = Version(config[\"version\"])\n spec = Specifier(f\"=={version.major}.{version.minor}.*\")\n log.info(f\"Reading in '{identifier}' -> {spec} @ {version}\")\n orig_config = copy.copy(config)\n config_update: AnyConfig | None = None\n\n # We need to use ** in update due to MyPy (probably a bug)\n if \"macos\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.macos_cpython.update_version_macos(identifier, version, spec)\n elif identifier.startswith(\"pp\"):\n config_update = self.macos_pypy.update_version_macos(spec)\n elif \"win32\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_32.update_version_windows(spec)\n elif \"win_amd64\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_64.update_version_windows(spec)\n elif identifier.startswith(\"pp\"):\n config_update = self.windows_pypy_64.update_version_windows(spec)\n elif \"win_arm64\" in identifier:\n if identifier.startswith(\"cp\"):\n config_update = self.windows_arm64.update_version_windows(spec)\n\n assert config_update is not None, f\"{identifier} not found!\"\n config.update(**config_update)\n\n if config != orig_config:\n log.info(f\" Updated {orig_config} to {config}\")\n\n\[email protected]()\[email protected](\"--force\", is_flag=True)\[email protected](\n \"--level\", default=\"INFO\", type=click.Choice([\"WARNING\", \"INFO\", \"DEBUG\"], case_sensitive=False)\n)\ndef update_pythons(force: bool, level: str) -> None:\n\n logging.basicConfig(\n level=\"INFO\",\n format=\"%(message)s\",\n datefmt=\"[%X]\",\n handlers=[RichHandler(rich_tracebacks=True, markup=True)],\n )\n log.setLevel(level)\n\n all_versions = AllVersions()\n toml_file_path = RESOURCES_DIR / \"build-platforms.toml\"\n\n original_toml = toml_file_path.read_text()\n with toml_file_path.open(\"rb\") as f:\n configs = tomli.load(f)\n\n for config in configs[\"windows\"][\"python_configurations\"]:\n all_versions.update_config(config)\n\n for config in configs[\"macos\"][\"python_configurations\"]:\n all_versions.update_config(config)\n\n result_toml = dump_python_configurations(configs)\n\n rich.print() # spacer\n\n if original_toml == result_toml:\n rich.print(\"[green]Check complete, Python configurations unchanged.\")\n return\n\n rich.print(\"Python configurations updated.\")\n rich.print(\"Changes:\")\n rich.print()\n\n toml_relpath = toml_file_path.relative_to(DIR).as_posix()\n diff_lines = difflib.unified_diff(\n original_toml.splitlines(keepends=True),\n result_toml.splitlines(keepends=True),\n fromfile=toml_relpath,\n tofile=toml_relpath,\n )\n rich.print(Syntax(\"\".join(diff_lines), \"diff\", theme=\"ansi_light\"))\n rich.print()\n\n if force:\n toml_file_path.write_text(result_toml)\n rich.print(\"[green]TOML file updated.\")\n else:\n rich.print(\"[yellow]File left unchanged. Use --force flag to update.\")\n\n\nif __name__ == \"__main__\":\n update_pythons()\n", "path": "bin/update_pythons.py"}]}
| 3,912 | 202 |
gh_patches_debug_29306
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-2827
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CustomJsonEncoder drops currency information from Money instance
Usually, we are using `settings.DEFAULT_CURRENCY` all over the place, however, there can be a bug, where `Money` is saved into `JSONField`, and `settings.DEFAULT_CURRENCY` changed meanwhile.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/utils/json_serializer.py`
Content:
```
1 from django.core.serializers.json import (
2 Deserializer as JsonDeserializer, DjangoJSONEncoder,
3 Serializer as JsonSerializer)
4 from prices import Money
5
6
7 class Serializer(JsonSerializer):
8 def _init_options(self):
9 super()._init_options()
10 self.json_kwargs['cls'] = CustomJsonEncoder
11
12
13 class CustomJsonEncoder(DjangoJSONEncoder):
14 def default(self, obj):
15 if isinstance(obj, Money):
16 return obj.amount
17 return super().default(obj)
18
19
20 Deserializer = JsonDeserializer
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/core/utils/json_serializer.py b/saleor/core/utils/json_serializer.py
--- a/saleor/core/utils/json_serializer.py
+++ b/saleor/core/utils/json_serializer.py
@@ -1,8 +1,12 @@
+import json
+
+from django.core.serializers.base import DeserializationError
from django.core.serializers.json import (
- Deserializer as JsonDeserializer, DjangoJSONEncoder,
- Serializer as JsonSerializer)
+ DjangoJSONEncoder, PythonDeserializer, Serializer as JsonSerializer)
from prices import Money
+MONEY_TYPE = 'Money'
+
class Serializer(JsonSerializer):
def _init_options(self):
@@ -13,8 +17,32 @@
class CustomJsonEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Money):
- return obj.amount
+ return {
+ '_type': MONEY_TYPE, 'amount': obj.amount,
+ 'currency': obj.currency}
return super().default(obj)
-Deserializer = JsonDeserializer
+def object_hook(obj):
+ if '_type' in obj and obj['_type'] == MONEY_TYPE:
+ return Money(obj['amount'], obj['currency'])
+ return obj
+
+
+def Deserializer(stream_or_string, **options):
+ """Deserialize a stream or string of JSON data. This is a slightly modified
+ copy of Django implementation with additional argument <object_hook> in
+ json.loads"""
+ if not isinstance(stream_or_string, (bytes, str)):
+ stream_or_string = stream_or_string.read()
+ if isinstance(stream_or_string, bytes):
+ stream_or_string = stream_or_string.decode()
+ try:
+ objects = json.loads(stream_or_string, object_hook=object_hook)
+ yield from PythonDeserializer(objects, **options)
+ except Exception as exc:
+ # ugly construction to overcome pylint's warning
+ # "The except handler raises immediately"
+ if isinstance(exc, (GeneratorExit, DeserializationError)):
+ raise
+ raise DeserializationError() from exc
|
{"golden_diff": "diff --git a/saleor/core/utils/json_serializer.py b/saleor/core/utils/json_serializer.py\n--- a/saleor/core/utils/json_serializer.py\n+++ b/saleor/core/utils/json_serializer.py\n@@ -1,8 +1,12 @@\n+import json\n+\n+from django.core.serializers.base import DeserializationError\n from django.core.serializers.json import (\n- Deserializer as JsonDeserializer, DjangoJSONEncoder,\n- Serializer as JsonSerializer)\n+ DjangoJSONEncoder, PythonDeserializer, Serializer as JsonSerializer)\n from prices import Money\n \n+MONEY_TYPE = 'Money'\n+\n \n class Serializer(JsonSerializer):\n def _init_options(self):\n@@ -13,8 +17,32 @@\n class CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n- return obj.amount\n+ return {\n+ '_type': MONEY_TYPE, 'amount': obj.amount,\n+ 'currency': obj.currency}\n return super().default(obj)\n \n \n-Deserializer = JsonDeserializer\n+def object_hook(obj):\n+ if '_type' in obj and obj['_type'] == MONEY_TYPE:\n+ return Money(obj['amount'], obj['currency'])\n+ return obj\n+\n+\n+def Deserializer(stream_or_string, **options):\n+ \"\"\"Deserialize a stream or string of JSON data. This is a slightly modified\n+ copy of Django implementation with additional argument <object_hook> in\n+ json.loads\"\"\"\n+ if not isinstance(stream_or_string, (bytes, str)):\n+ stream_or_string = stream_or_string.read()\n+ if isinstance(stream_or_string, bytes):\n+ stream_or_string = stream_or_string.decode()\n+ try:\n+ objects = json.loads(stream_or_string, object_hook=object_hook)\n+ yield from PythonDeserializer(objects, **options)\n+ except Exception as exc:\n+ # ugly construction to overcome pylint's warning\n+ # \"The except handler raises immediately\"\n+ if isinstance(exc, (GeneratorExit, DeserializationError)):\n+ raise\n+ raise DeserializationError() from exc\n", "issue": "CustomJsonEncoder drops currency information from Money instance\nUsually, we are using `settings.DEFAULT_CURRENCY` all over the place, however, there can be a bug, where `Money` is saved into `JSONField`, and `settings.DEFAULT_CURRENCY` changed meanwhile.\r\n\r\n\n", "before_files": [{"content": "from django.core.serializers.json import (\n Deserializer as JsonDeserializer, DjangoJSONEncoder,\n Serializer as JsonSerializer)\nfrom prices import Money\n\n\nclass Serializer(JsonSerializer):\n def _init_options(self):\n super()._init_options()\n self.json_kwargs['cls'] = CustomJsonEncoder\n\n\nclass CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n return obj.amount\n return super().default(obj)\n\n\nDeserializer = JsonDeserializer\n", "path": "saleor/core/utils/json_serializer.py"}], "after_files": [{"content": "import json\n\nfrom django.core.serializers.base import DeserializationError\nfrom django.core.serializers.json import (\n DjangoJSONEncoder, PythonDeserializer, Serializer as JsonSerializer)\nfrom prices import Money\n\nMONEY_TYPE = 'Money'\n\n\nclass Serializer(JsonSerializer):\n def _init_options(self):\n super()._init_options()\n self.json_kwargs['cls'] = CustomJsonEncoder\n\n\nclass CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n return {\n '_type': MONEY_TYPE, 'amount': obj.amount,\n 'currency': obj.currency}\n return super().default(obj)\n\n\ndef object_hook(obj):\n if '_type' in obj and obj['_type'] == MONEY_TYPE:\n return Money(obj['amount'], obj['currency'])\n return obj\n\n\ndef Deserializer(stream_or_string, **options):\n \"\"\"Deserialize a stream or string of JSON data. This is a slightly modified\n copy of Django implementation with additional argument <object_hook> in\n json.loads\"\"\"\n if not isinstance(stream_or_string, (bytes, str)):\n stream_or_string = stream_or_string.read()\n if isinstance(stream_or_string, bytes):\n stream_or_string = stream_or_string.decode()\n try:\n objects = json.loads(stream_or_string, object_hook=object_hook)\n yield from PythonDeserializer(objects, **options)\n except Exception as exc:\n # ugly construction to overcome pylint's warning\n # \"The except handler raises immediately\"\n if isinstance(exc, (GeneratorExit, DeserializationError)):\n raise\n raise DeserializationError() from exc\n", "path": "saleor/core/utils/json_serializer.py"}]}
| 457 | 444 |
gh_patches_debug_36537
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1783
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_hog.py results in strangely-looking figure in the gallery
Compare http://scikit-image.org/docs/dev/_images/plot_hog_1.png and http://scikit-image.org/docs/0.11.x/_images/plot_hog_1.png
I don't think the example changed, so it must be inside the hog functions or in one of the functions called by this function.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/feature/_hog.py`
Content:
```
1 from __future__ import division
2 import numpy as np
3 from .._shared.utils import assert_nD
4 from . import _hoghistogram
5
6
7 def hog(image, orientations=9, pixels_per_cell=(8, 8),
8 cells_per_block=(3, 3), visualise=False, normalise=False):
9 """Extract Histogram of Oriented Gradients (HOG) for a given image.
10
11 Compute a Histogram of Oriented Gradients (HOG) by
12
13 1. (optional) global image normalisation
14 2. computing the gradient image in x and y
15 3. computing gradient histograms
16 4. normalising across blocks
17 5. flattening into a feature vector
18
19 Parameters
20 ----------
21 image : (M, N) ndarray
22 Input image (greyscale).
23 orientations : int
24 Number of orientation bins.
25 pixels_per_cell : 2 tuple (int, int)
26 Size (in pixels) of a cell.
27 cells_per_block : 2 tuple (int,int)
28 Number of cells in each block.
29 visualise : bool, optional
30 Also return an image of the HOG.
31 normalise : bool, optional
32 Apply power law compression to normalise the image before
33 processing.
34
35 Returns
36 -------
37 newarr : ndarray
38 HOG for the image as a 1D (flattened) array.
39 hog_image : ndarray (if visualise=True)
40 A visualisation of the HOG image.
41
42 References
43 ----------
44 * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
45
46 * Dalal, N and Triggs, B, Histograms of Oriented Gradients for
47 Human Detection, IEEE Computer Society Conference on Computer
48 Vision and Pattern Recognition 2005 San Diego, CA, USA
49
50 """
51 image = np.atleast_2d(image)
52
53 """
54 The first stage applies an optional global image normalisation
55 equalisation that is designed to reduce the influence of illumination
56 effects. In practice we use gamma (power law) compression, either
57 computing the square root or the log of each colour channel.
58 Image texture strength is typically proportional to the local surface
59 illumination so this compression helps to reduce the effects of local
60 shadowing and illumination variations.
61 """
62
63 assert_nD(image, 2)
64
65 if normalise:
66 image = np.sqrt(image)
67
68 """
69 The second stage computes first order image gradients. These capture
70 contour, silhouette and some texture information, while providing
71 further resistance to illumination variations. The locally dominant
72 colour channel is used, which provides colour invariance to a large
73 extent. Variant methods may also include second order image derivatives,
74 which act as primitive bar detectors - a useful feature for capturing,
75 e.g. bar like structures in bicycles and limbs in humans.
76 """
77
78 if image.dtype.kind == 'u':
79 # convert uint image to float
80 # to avoid problems with subtracting unsigned numbers in np.diff()
81 image = image.astype('float')
82
83 gx = np.empty(image.shape, dtype=np.double)
84 gx[:, 0] = 0
85 gx[:, -1] = 0
86 gx[:, 1:-1] = image[:, 2:] - image[:, :-2]
87 gy = np.empty(image.shape, dtype=np.double)
88 gy[0, :] = 0
89 gy[-1, :] = 0
90 gy[1:-1, :] = image[2:, :] - image[:-2, :]
91
92 """
93 The third stage aims to produce an encoding that is sensitive to
94 local image content while remaining resistant to small changes in
95 pose or appearance. The adopted method pools gradient orientation
96 information locally in the same way as the SIFT [Lowe 2004]
97 feature. The image window is divided into small spatial regions,
98 called "cells". For each cell we accumulate a local 1-D histogram
99 of gradient or edge orientations over all the pixels in the
100 cell. This combined cell-level 1-D histogram forms the basic
101 "orientation histogram" representation. Each orientation histogram
102 divides the gradient angle range into a fixed number of
103 predetermined bins. The gradient magnitudes of the pixels in the
104 cell are used to vote into the orientation histogram.
105 """
106
107 sy, sx = image.shape
108 cx, cy = pixels_per_cell
109 bx, by = cells_per_block
110
111 n_cellsx = int(np.floor(sx // cx)) # number of cells in x
112 n_cellsy = int(np.floor(sy // cy)) # number of cells in y
113
114 # compute orientations integral images
115 orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))
116
117 _hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy,
118 orientations, orientation_histogram)
119
120 # now for each cell, compute the histogram
121 hog_image = None
122
123 if visualise:
124 from .. import draw
125
126 radius = min(cx, cy) // 2 - 1
127 orientations_arr = np.arange(orientations)
128 dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)
129 dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)
130 cr2 = cy + cy
131 cc2 = cx + cx
132 hog_image = np.zeros((sy, sx), dtype=float)
133 for x in range(n_cellsx):
134 for y in range(n_cellsy):
135 for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):
136 centre = tuple([y * cr2 // 2, x * cc2 // 2])
137 rr, cc = draw.line(int(centre[0] - dx),
138 int(centre[1] + dy),
139 int(centre[0] + dx),
140 int(centre[1] - dy))
141 hog_image[rr, cc] += orientation_histogram[y, x, o]
142
143 """
144 The fourth stage computes normalisation, which takes local groups of
145 cells and contrast normalises their overall responses before passing
146 to next stage. Normalisation introduces better invariance to illumination,
147 shadowing, and edge contrast. It is performed by accumulating a measure
148 of local histogram "energy" over local groups of cells that we call
149 "blocks". The result is used to normalise each cell in the block.
150 Typically each individual cell is shared between several blocks, but
151 its normalisations are block dependent and thus different. The cell
152 thus appears several times in the final output vector with different
153 normalisations. This may seem redundant but it improves the performance.
154 We refer to the normalised block descriptors as Histogram of Oriented
155 Gradient (HOG) descriptors.
156 """
157
158 n_blocksx = (n_cellsx - bx) + 1
159 n_blocksy = (n_cellsy - by) + 1
160 normalised_blocks = np.zeros((n_blocksy, n_blocksx,
161 by, bx, orientations))
162
163 for x in range(n_blocksx):
164 for y in range(n_blocksy):
165 block = orientation_histogram[y:y + by, x:x + bx, :]
166 eps = 1e-5
167 normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2 + eps)
168
169 """
170 The final step collects the HOG descriptors from all blocks of a dense
171 overlapping grid of blocks covering the detection window into a combined
172 feature vector for use in the window classifier.
173 """
174
175 if visualise:
176 return normalised_blocks.ravel(), hog_image
177 else:
178 return normalised_blocks.ravel()
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/feature/_hog.py b/skimage/feature/_hog.py
--- a/skimage/feature/_hog.py
+++ b/skimage/feature/_hog.py
@@ -5,7 +5,8 @@
def hog(image, orientations=9, pixels_per_cell=(8, 8),
- cells_per_block=(3, 3), visualise=False, normalise=False):
+ cells_per_block=(3, 3), visualise=False, normalise=False,
+ feature_vector=True):
"""Extract Histogram of Oriented Gradients (HOG) for a given image.
Compute a Histogram of Oriented Gradients (HOG) by
@@ -31,6 +32,9 @@
normalise : bool, optional
Apply power law compression to normalise the image before
processing.
+ feature_vector : bool, optional
+ Return the data as a feature vector by calling .ravel() on the result
+ just before returning.
Returns
-------
@@ -127,13 +131,11 @@
orientations_arr = np.arange(orientations)
dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)
dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)
- cr2 = cy + cy
- cc2 = cx + cx
hog_image = np.zeros((sy, sx), dtype=float)
for x in range(n_cellsx):
for y in range(n_cellsy):
for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):
- centre = tuple([y * cr2 // 2, x * cc2 // 2])
+ centre = tuple([y * cy + cy // 2, x * cx + cx // 2])
rr, cc = draw.line(int(centre[0] - dx),
int(centre[1] + dy),
int(centre[0] + dx),
@@ -171,8 +173,11 @@
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
"""
+
+ if feature_vector:
+ normalised_blocks = normalised_blocks.ravel()
if visualise:
- return normalised_blocks.ravel(), hog_image
+ return normalised_blocks, hog_image
else:
- return normalised_blocks.ravel()
+ return normalised_blocks
|
{"golden_diff": "diff --git a/skimage/feature/_hog.py b/skimage/feature/_hog.py\n--- a/skimage/feature/_hog.py\n+++ b/skimage/feature/_hog.py\n@@ -5,7 +5,8 @@\n \n \n def hog(image, orientations=9, pixels_per_cell=(8, 8),\n- cells_per_block=(3, 3), visualise=False, normalise=False):\n+ cells_per_block=(3, 3), visualise=False, normalise=False,\n+ feature_vector=True):\n \"\"\"Extract Histogram of Oriented Gradients (HOG) for a given image.\n \n Compute a Histogram of Oriented Gradients (HOG) by\n@@ -31,6 +32,9 @@\n normalise : bool, optional\n Apply power law compression to normalise the image before\n processing.\n+ feature_vector : bool, optional\n+ Return the data as a feature vector by calling .ravel() on the result\n+ just before returning.\n \n Returns\n -------\n@@ -127,13 +131,11 @@\n orientations_arr = np.arange(orientations)\n dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)\n dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)\n- cr2 = cy + cy\n- cc2 = cx + cx\n hog_image = np.zeros((sy, sx), dtype=float)\n for x in range(n_cellsx):\n for y in range(n_cellsy):\n for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):\n- centre = tuple([y * cr2 // 2, x * cc2 // 2])\n+ centre = tuple([y * cy + cy // 2, x * cx + cx // 2])\n rr, cc = draw.line(int(centre[0] - dx),\n int(centre[1] + dy),\n int(centre[0] + dx),\n@@ -171,8 +173,11 @@\n overlapping grid of blocks covering the detection window into a combined\n feature vector for use in the window classifier.\n \"\"\"\n+ \n+ if feature_vector:\n+ normalised_blocks = normalised_blocks.ravel()\n \n if visualise:\n- return normalised_blocks.ravel(), hog_image\n+ return normalised_blocks, hog_image\n else:\n- return normalised_blocks.ravel()\n+ return normalised_blocks\n", "issue": "plot_hog.py results in strangely-looking figure in the gallery\nCompare http://scikit-image.org/docs/dev/_images/plot_hog_1.png and http://scikit-image.org/docs/0.11.x/_images/plot_hog_1.png\n\nI don't think the example changed, so it must be inside the hog functions or in one of the functions called by this function.\n\n", "before_files": [{"content": "from __future__ import division\nimport numpy as np\nfrom .._shared.utils import assert_nD\nfrom . import _hoghistogram\n\n\ndef hog(image, orientations=9, pixels_per_cell=(8, 8),\n cells_per_block=(3, 3), visualise=False, normalise=False):\n \"\"\"Extract Histogram of Oriented Gradients (HOG) for a given image.\n\n Compute a Histogram of Oriented Gradients (HOG) by\n\n 1. (optional) global image normalisation\n 2. computing the gradient image in x and y\n 3. computing gradient histograms\n 4. normalising across blocks\n 5. flattening into a feature vector\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image (greyscale).\n orientations : int\n Number of orientation bins.\n pixels_per_cell : 2 tuple (int, int)\n Size (in pixels) of a cell.\n cells_per_block : 2 tuple (int,int)\n Number of cells in each block.\n visualise : bool, optional\n Also return an image of the HOG.\n normalise : bool, optional\n Apply power law compression to normalise the image before\n processing.\n\n Returns\n -------\n newarr : ndarray\n HOG for the image as a 1D (flattened) array.\n hog_image : ndarray (if visualise=True)\n A visualisation of the HOG image.\n\n References\n ----------\n * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients\n\n * Dalal, N and Triggs, B, Histograms of Oriented Gradients for\n Human Detection, IEEE Computer Society Conference on Computer\n Vision and Pattern Recognition 2005 San Diego, CA, USA\n\n \"\"\"\n image = np.atleast_2d(image)\n\n \"\"\"\n The first stage applies an optional global image normalisation\n equalisation that is designed to reduce the influence of illumination\n effects. In practice we use gamma (power law) compression, either\n computing the square root or the log of each colour channel.\n Image texture strength is typically proportional to the local surface\n illumination so this compression helps to reduce the effects of local\n shadowing and illumination variations.\n \"\"\"\n\n assert_nD(image, 2)\n\n if normalise:\n image = np.sqrt(image)\n\n \"\"\"\n The second stage computes first order image gradients. These capture\n contour, silhouette and some texture information, while providing\n further resistance to illumination variations. The locally dominant\n colour channel is used, which provides colour invariance to a large\n extent. Variant methods may also include second order image derivatives,\n which act as primitive bar detectors - a useful feature for capturing,\n e.g. bar like structures in bicycles and limbs in humans.\n \"\"\"\n\n if image.dtype.kind == 'u':\n # convert uint image to float\n # to avoid problems with subtracting unsigned numbers in np.diff()\n image = image.astype('float')\n\n gx = np.empty(image.shape, dtype=np.double)\n gx[:, 0] = 0\n gx[:, -1] = 0\n gx[:, 1:-1] = image[:, 2:] - image[:, :-2]\n gy = np.empty(image.shape, dtype=np.double)\n gy[0, :] = 0\n gy[-1, :] = 0\n gy[1:-1, :] = image[2:, :] - image[:-2, :]\n\n \"\"\"\n The third stage aims to produce an encoding that is sensitive to\n local image content while remaining resistant to small changes in\n pose or appearance. The adopted method pools gradient orientation\n information locally in the same way as the SIFT [Lowe 2004]\n feature. The image window is divided into small spatial regions,\n called \"cells\". For each cell we accumulate a local 1-D histogram\n of gradient or edge orientations over all the pixels in the\n cell. This combined cell-level 1-D histogram forms the basic\n \"orientation histogram\" representation. Each orientation histogram\n divides the gradient angle range into a fixed number of\n predetermined bins. The gradient magnitudes of the pixels in the\n cell are used to vote into the orientation histogram.\n \"\"\"\n\n sy, sx = image.shape\n cx, cy = pixels_per_cell\n bx, by = cells_per_block\n\n n_cellsx = int(np.floor(sx // cx)) # number of cells in x\n n_cellsy = int(np.floor(sy // cy)) # number of cells in y\n\n # compute orientations integral images\n orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))\n\n _hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy,\n orientations, orientation_histogram)\n\n # now for each cell, compute the histogram\n hog_image = None\n\n if visualise:\n from .. import draw\n\n radius = min(cx, cy) // 2 - 1\n orientations_arr = np.arange(orientations)\n dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)\n dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)\n cr2 = cy + cy\n cc2 = cx + cx\n hog_image = np.zeros((sy, sx), dtype=float)\n for x in range(n_cellsx):\n for y in range(n_cellsy):\n for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):\n centre = tuple([y * cr2 // 2, x * cc2 // 2])\n rr, cc = draw.line(int(centre[0] - dx),\n int(centre[1] + dy),\n int(centre[0] + dx),\n int(centre[1] - dy))\n hog_image[rr, cc] += orientation_histogram[y, x, o]\n\n \"\"\"\n The fourth stage computes normalisation, which takes local groups of\n cells and contrast normalises their overall responses before passing\n to next stage. Normalisation introduces better invariance to illumination,\n shadowing, and edge contrast. It is performed by accumulating a measure\n of local histogram \"energy\" over local groups of cells that we call\n \"blocks\". The result is used to normalise each cell in the block.\n Typically each individual cell is shared between several blocks, but\n its normalisations are block dependent and thus different. The cell\n thus appears several times in the final output vector with different\n normalisations. This may seem redundant but it improves the performance.\n We refer to the normalised block descriptors as Histogram of Oriented\n Gradient (HOG) descriptors.\n \"\"\"\n\n n_blocksx = (n_cellsx - bx) + 1\n n_blocksy = (n_cellsy - by) + 1\n normalised_blocks = np.zeros((n_blocksy, n_blocksx,\n by, bx, orientations))\n\n for x in range(n_blocksx):\n for y in range(n_blocksy):\n block = orientation_histogram[y:y + by, x:x + bx, :]\n eps = 1e-5\n normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2 + eps)\n\n \"\"\"\n The final step collects the HOG descriptors from all blocks of a dense\n overlapping grid of blocks covering the detection window into a combined\n feature vector for use in the window classifier.\n \"\"\"\n\n if visualise:\n return normalised_blocks.ravel(), hog_image\n else:\n return normalised_blocks.ravel()\n", "path": "skimage/feature/_hog.py"}], "after_files": [{"content": "from __future__ import division\nimport numpy as np\nfrom .._shared.utils import assert_nD\nfrom . import _hoghistogram\n\n\ndef hog(image, orientations=9, pixels_per_cell=(8, 8),\n cells_per_block=(3, 3), visualise=False, normalise=False,\n feature_vector=True):\n \"\"\"Extract Histogram of Oriented Gradients (HOG) for a given image.\n\n Compute a Histogram of Oriented Gradients (HOG) by\n\n 1. (optional) global image normalisation\n 2. computing the gradient image in x and y\n 3. computing gradient histograms\n 4. normalising across blocks\n 5. flattening into a feature vector\n\n Parameters\n ----------\n image : (M, N) ndarray\n Input image (greyscale).\n orientations : int\n Number of orientation bins.\n pixels_per_cell : 2 tuple (int, int)\n Size (in pixels) of a cell.\n cells_per_block : 2 tuple (int,int)\n Number of cells in each block.\n visualise : bool, optional\n Also return an image of the HOG.\n normalise : bool, optional\n Apply power law compression to normalise the image before\n processing.\n feature_vector : bool, optional\n Return the data as a feature vector by calling .ravel() on the result\n just before returning.\n\n Returns\n -------\n newarr : ndarray\n HOG for the image as a 1D (flattened) array.\n hog_image : ndarray (if visualise=True)\n A visualisation of the HOG image.\n\n References\n ----------\n * http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients\n\n * Dalal, N and Triggs, B, Histograms of Oriented Gradients for\n Human Detection, IEEE Computer Society Conference on Computer\n Vision and Pattern Recognition 2005 San Diego, CA, USA\n\n \"\"\"\n image = np.atleast_2d(image)\n\n \"\"\"\n The first stage applies an optional global image normalisation\n equalisation that is designed to reduce the influence of illumination\n effects. In practice we use gamma (power law) compression, either\n computing the square root or the log of each colour channel.\n Image texture strength is typically proportional to the local surface\n illumination so this compression helps to reduce the effects of local\n shadowing and illumination variations.\n \"\"\"\n\n assert_nD(image, 2)\n\n if normalise:\n image = np.sqrt(image)\n\n \"\"\"\n The second stage computes first order image gradients. These capture\n contour, silhouette and some texture information, while providing\n further resistance to illumination variations. The locally dominant\n colour channel is used, which provides colour invariance to a large\n extent. Variant methods may also include second order image derivatives,\n which act as primitive bar detectors - a useful feature for capturing,\n e.g. bar like structures in bicycles and limbs in humans.\n \"\"\"\n\n if image.dtype.kind == 'u':\n # convert uint image to float\n # to avoid problems with subtracting unsigned numbers in np.diff()\n image = image.astype('float')\n\n gx = np.empty(image.shape, dtype=np.double)\n gx[:, 0] = 0\n gx[:, -1] = 0\n gx[:, 1:-1] = image[:, 2:] - image[:, :-2]\n gy = np.empty(image.shape, dtype=np.double)\n gy[0, :] = 0\n gy[-1, :] = 0\n gy[1:-1, :] = image[2:, :] - image[:-2, :]\n\n \"\"\"\n The third stage aims to produce an encoding that is sensitive to\n local image content while remaining resistant to small changes in\n pose or appearance. The adopted method pools gradient orientation\n information locally in the same way as the SIFT [Lowe 2004]\n feature. The image window is divided into small spatial regions,\n called \"cells\". For each cell we accumulate a local 1-D histogram\n of gradient or edge orientations over all the pixels in the\n cell. This combined cell-level 1-D histogram forms the basic\n \"orientation histogram\" representation. Each orientation histogram\n divides the gradient angle range into a fixed number of\n predetermined bins. The gradient magnitudes of the pixels in the\n cell are used to vote into the orientation histogram.\n \"\"\"\n\n sy, sx = image.shape\n cx, cy = pixels_per_cell\n bx, by = cells_per_block\n\n n_cellsx = int(np.floor(sx // cx)) # number of cells in x\n n_cellsy = int(np.floor(sy // cy)) # number of cells in y\n\n # compute orientations integral images\n orientation_histogram = np.zeros((n_cellsy, n_cellsx, orientations))\n\n _hoghistogram.hog_histograms(gx, gy, cx, cy, sx, sy, n_cellsx, n_cellsy,\n orientations, orientation_histogram)\n\n # now for each cell, compute the histogram\n hog_image = None\n\n if visualise:\n from .. import draw\n\n radius = min(cx, cy) // 2 - 1\n orientations_arr = np.arange(orientations)\n dx_arr = radius * np.cos(orientations_arr / orientations * np.pi)\n dy_arr = radius * np.sin(orientations_arr / orientations * np.pi)\n hog_image = np.zeros((sy, sx), dtype=float)\n for x in range(n_cellsx):\n for y in range(n_cellsy):\n for o, dx, dy in zip(orientations_arr, dx_arr, dy_arr):\n centre = tuple([y * cy + cy // 2, x * cx + cx // 2])\n rr, cc = draw.line(int(centre[0] - dx),\n int(centre[1] + dy),\n int(centre[0] + dx),\n int(centre[1] - dy))\n hog_image[rr, cc] += orientation_histogram[y, x, o]\n\n \"\"\"\n The fourth stage computes normalisation, which takes local groups of\n cells and contrast normalises their overall responses before passing\n to next stage. Normalisation introduces better invariance to illumination,\n shadowing, and edge contrast. It is performed by accumulating a measure\n of local histogram \"energy\" over local groups of cells that we call\n \"blocks\". The result is used to normalise each cell in the block.\n Typically each individual cell is shared between several blocks, but\n its normalisations are block dependent and thus different. The cell\n thus appears several times in the final output vector with different\n normalisations. This may seem redundant but it improves the performance.\n We refer to the normalised block descriptors as Histogram of Oriented\n Gradient (HOG) descriptors.\n \"\"\"\n\n n_blocksx = (n_cellsx - bx) + 1\n n_blocksy = (n_cellsy - by) + 1\n normalised_blocks = np.zeros((n_blocksy, n_blocksx,\n by, bx, orientations))\n\n for x in range(n_blocksx):\n for y in range(n_blocksy):\n block = orientation_histogram[y:y + by, x:x + bx, :]\n eps = 1e-5\n normalised_blocks[y, x, :] = block / np.sqrt(block.sum() ** 2 + eps)\n\n \"\"\"\n The final step collects the HOG descriptors from all blocks of a dense\n overlapping grid of blocks covering the detection window into a combined\n feature vector for use in the window classifier.\n \"\"\"\n \n if feature_vector:\n normalised_blocks = normalised_blocks.ravel()\n\n if visualise:\n return normalised_blocks, hog_image\n else:\n return normalised_blocks\n", "path": "skimage/feature/_hog.py"}]}
| 2,449 | 543 |
gh_patches_debug_28746
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-226
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make inputs dimension-agnostic (1D/2D)
In other words, make `Scattering*D` operators act only on last dimension without forcing a specific convention on previous dimensions. The output would have the same shape, but this the time/spatial dimension subsampled and another (scattering) dimension added right before it. So we'd have
- in 1D, `batch_shape + (T,)` input gives `batch_shape + (C, T/2**J)` output, and
- in 2D, `batch_shape + (N, M)` input gives `batch_shape + (C, N/2**J, M/2**J)` output,
where `batch_shape` is some (possibly empty) tuple specifying the batch dimensions that are not to be processed and `C` is the number of scattering coefficients.
The 3D case is more complicated since it follows a different output schema compared to the 1D and 2D, which will be addressed in a separate issue. That being said, it would be good if it could mimic the above behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kymatio/scattering2d/scattering2d.py`
Content:
```
1 # Authors: Edouard Oyallon
2 # Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna
3
4
5 __all__ = ['Scattering2D']
6
7 import torch
8 from .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad
9 from .filter_bank import filter_bank
10 from .utils import compute_padding
11
12
13 class Scattering2D(object):
14 """Main module implementing the scattering transform in 2D.
15 The scattering transform computes two wavelet transform followed
16 by modulus non-linearity.
17 It can be summarized as::
18
19 S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]
20
21 where::
22
23 S_J^0 x = x * phi_J
24 S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda
25 S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}
26
27 where * denotes the convolution (in space), phi_J is a low pass
28 filter, psi^1_lambda is a family of band pass
29 filters and psi^2_mu is another family of band pass filters.
30 Only Morlet filters are used in this implementation.
31 Convolutions are efficiently performed in the Fourier domain
32 with this implementation.
33
34 Example
35 -------
36 # 1) Define a Scattering object as:
37 s = Scattering2D(J, M, N)
38 # where (M, N) are the image sizes and 2**J the scale of the scattering
39 # 2) Forward on an input Variable x of shape B x 1 x M x N,
40 # where B is the batch size.
41 result_s = s(x)
42
43 Parameters
44 ----------
45 J : int
46 logscale of the scattering
47 shape : tuple of int
48 spatial support (M, N) of the input
49 L : int, optional
50 number of angles used for the wavelet transform
51 max_order : int, optional
52 The maximum order of scattering coefficients to compute. Must be either
53 `1` or `2`. Defaults to `2`.
54 pre_pad : boolean, optional
55 controls the padding: if set to False, a symmetric padding is applied
56 on the signal. If set to true, the software will assume the signal was
57 padded externally.
58
59 Attributes
60 ----------
61 J : int
62 logscale of the scattering
63 shape : tuple of int
64 spatial support (M, N) of the input
65 L : int, optional
66 number of angles used for the wavelet transform
67 max_order : int, optional
68 The maximum order of scattering coefficients to compute.
69 Must be either equal to `1` or `2`. Defaults to `2`.
70 pre_pad : boolean
71 controls the padding
72 Psi : dictionary
73 containing the wavelets filters at all resolutions. See
74 filter_bank.filter_bank for an exact description.
75 Phi : dictionary
76 containing the low-pass filters at all resolutions. See
77 filter_bank.filter_bank for an exact description.
78 M_padded, N_padded : int
79 spatial support of the padded input
80
81 Notes
82 -----
83 The design of the filters is optimized for the value L = 8
84
85 pre_pad is particularly useful when doing crops of a bigger
86 image because the padding is then extremely accurate. Defaults
87 to False.
88
89 """
90 def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):
91 self.J, self.L = J, L
92 self.pre_pad = pre_pad
93 self.max_order = max_order
94 self.shape = shape
95
96 self.build()
97
98 def build(self):
99 self.M, self.N = self.shape
100 self.modulus = Modulus()
101 self.pad = Pad(2**self.J, pre_pad = self.pre_pad)
102 self.subsample_fourier = SubsampleFourier()
103 # Create the filters
104 self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)
105 filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)
106 self.Psi = filters['psi']
107 self.Phi = [filters['phi'][j] for j in range(self.J)]
108
109 def _type(self, _type):
110 for key, item in enumerate(self.Psi):
111 for key2, item2 in self.Psi[key].items():
112 if torch.is_tensor(item2):
113 self.Psi[key][key2] = item2.type(_type)
114 self.Phi = [v.type(_type) for v in self.Phi]
115 self.pad.padding_module.type(_type)
116 return self
117
118 def cuda(self):
119 """
120 Moves the parameters of the scattering to the GPU
121 """
122 return self._type(torch.cuda.FloatTensor)
123
124 def cpu(self):
125 """
126 Moves the parameters of the scattering to the CPU
127 """
128 return self._type(torch.FloatTensor)
129
130 def forward(self, input):
131 """Forward pass of the scattering.
132
133 Parameters
134 ----------
135 input : tensor
136 tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.
137 :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.
138
139 Returns
140 -------
141 S : tensor
142 scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds
143 to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.
144
145 """
146 if not torch.is_tensor(input):
147 raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))
148
149 if (not input.is_contiguous()):
150 raise (RuntimeError('Tensor must be contiguous!'))
151
152 if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):
153 raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))
154
155 if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):
156 raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))
157
158 if (input.dim() != 4):
159 raise (RuntimeError('Input tensor must be 4D'))
160
161 J = self.J
162 phi = self.Phi
163 psi = self.Psi
164 n = 0
165
166 subsample_fourier = self.subsample_fourier
167 modulus = self.modulus
168 pad = self.pad
169 output_size = 1 + self.L*J
170 if self.max_order == 2:
171 output_size += self.L*self.L*J*(J - 1) // 2
172
173 S = input.new(input.size(0),
174 input.size(1),
175 output_size,
176 self.M_padded//(2**J)-2,
177 self.N_padded//(2**J)-2)
178 U_r = pad(input)
179 U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c
180
181 # First low pass filter
182 U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)
183
184 U_J_r = fft(U_1_c, 'C2R')
185
186 S[..., n, :, :] = unpad(U_J_r)
187 n = n + 1
188
189 for n1 in range(len(psi)):
190 j1 = psi[n1]['j']
191 U_1_c = cdgmm(U_0_c, psi[n1][0])
192 if(j1 > 0):
193 U_1_c = subsample_fourier(U_1_c, k=2 ** j1)
194 U_1_c = fft(U_1_c, 'C2C', inverse=True)
195 U_1_c = fft(modulus(U_1_c), 'C2C')
196
197 # Second low pass filter
198 U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))
199 U_J_r = fft(U_2_c, 'C2R')
200 S[..., n, :, :] = unpad(U_J_r)
201 n = n + 1
202
203 if self.max_order == 2:
204 for n2 in range(len(psi)):
205 j2 = psi[n2]['j']
206 if(j1 < j2):
207 U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))
208 U_2_c = fft(U_2_c, 'C2C', inverse=True)
209 U_2_c = fft(modulus(U_2_c), 'C2C')
210
211 # Third low pass filter
212 U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))
213 U_J_r = fft(U_2_c, 'C2R')
214
215 S[..., n, :, :] = unpad(U_J_r)
216 n = n + 1
217
218 return S
219
220 def __call__(self, input):
221 return self.forward(input)
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kymatio/scattering2d/scattering2d.py b/kymatio/scattering2d/scattering2d.py
--- a/kymatio/scattering2d/scattering2d.py
+++ b/kymatio/scattering2d/scattering2d.py
@@ -146,6 +146,10 @@
if not torch.is_tensor(input):
raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))
+ if len(input.shape) < 2:
+ raise (RuntimeError('Input tensor must have at least two '
+ 'dimensions'))
+
if (not input.is_contiguous()):
raise (RuntimeError('Tensor must be contiguous!'))
@@ -155,8 +159,10 @@
if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):
raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))
- if (input.dim() != 4):
- raise (RuntimeError('Input tensor must be 4D'))
+ batch_shape = input.shape[:-2]
+ signal_shape = input.shape[-2:]
+
+ input = input.reshape((-1, 1) + signal_shape)
J = self.J
phi = self.Phi
@@ -215,6 +221,9 @@
S[..., n, :, :] = unpad(U_J_r)
n = n + 1
+ scattering_shape = S.shape[-3:]
+ S = S.reshape(batch_shape + scattering_shape)
+
return S
def __call__(self, input):
|
{"golden_diff": "diff --git a/kymatio/scattering2d/scattering2d.py b/kymatio/scattering2d/scattering2d.py\n--- a/kymatio/scattering2d/scattering2d.py\n+++ b/kymatio/scattering2d/scattering2d.py\n@@ -146,6 +146,10 @@\n if not torch.is_tensor(input):\n raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))\n \n+ if len(input.shape) < 2:\n+ raise (RuntimeError('Input tensor must have at least two '\n+ 'dimensions'))\n+\n if (not input.is_contiguous()):\n raise (RuntimeError('Tensor must be contiguous!'))\n \n@@ -155,8 +159,10 @@\n if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):\n raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))\n \n- if (input.dim() != 4):\n- raise (RuntimeError('Input tensor must be 4D'))\n+ batch_shape = input.shape[:-2]\n+ signal_shape = input.shape[-2:]\n+\n+ input = input.reshape((-1, 1) + signal_shape)\n \n J = self.J\n phi = self.Phi\n@@ -215,6 +221,9 @@\n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n \n+ scattering_shape = S.shape[-3:]\n+ S = S.reshape(batch_shape + scattering_shape)\n+\n return S\n \n def __call__(self, input):\n", "issue": "Make inputs dimension-agnostic (1D/2D)\nIn other words, make `Scattering*D` operators act only on last dimension without forcing a specific convention on previous dimensions. The output would have the same shape, but this the time/spatial dimension subsampled and another (scattering) dimension added right before it. So we'd have\r\n\r\n- in 1D, `batch_shape + (T,)` input gives `batch_shape + (C, T/2**J)` output, and\r\n- in 2D, `batch_shape + (N, M)` input gives `batch_shape + (C, N/2**J, M/2**J)` output,\r\n\r\nwhere `batch_shape` is some (possibly empty) tuple specifying the batch dimensions that are not to be processed and `C` is the number of scattering coefficients.\r\n\r\nThe 3D case is more complicated since it follows a different output schema compared to the 1D and 2D, which will be addressed in a separate issue. That being said, it would be good if it could mimic the above behavior.\n", "before_files": [{"content": "# Authors: Edouard Oyallon\n# Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna\n\n\n__all__ = ['Scattering2D']\n\nimport torch\nfrom .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad\nfrom .filter_bank import filter_bank\nfrom .utils import compute_padding\n\n\nclass Scattering2D(object):\n \"\"\"Main module implementing the scattering transform in 2D.\n The scattering transform computes two wavelet transform followed\n by modulus non-linearity.\n It can be summarized as::\n\n S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]\n\n where::\n\n S_J^0 x = x * phi_J\n S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda\n S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}\n\n where * denotes the convolution (in space), phi_J is a low pass\n filter, psi^1_lambda is a family of band pass\n filters and psi^2_mu is another family of band pass filters.\n Only Morlet filters are used in this implementation.\n Convolutions are efficiently performed in the Fourier domain\n with this implementation.\n\n Example\n -------\n # 1) Define a Scattering object as:\n s = Scattering2D(J, M, N)\n # where (M, N) are the image sizes and 2**J the scale of the scattering\n # 2) Forward on an input Variable x of shape B x 1 x M x N,\n # where B is the batch size.\n result_s = s(x)\n\n Parameters\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute. Must be either\n `1` or `2`. Defaults to `2`.\n pre_pad : boolean, optional\n controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to true, the software will assume the signal was\n padded externally.\n\n Attributes\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute.\n Must be either equal to `1` or `2`. Defaults to `2`.\n pre_pad : boolean\n controls the padding\n Psi : dictionary\n containing the wavelets filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n Phi : dictionary\n containing the low-pass filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n M_padded, N_padded : int\n spatial support of the padded input\n\n Notes\n -----\n The design of the filters is optimized for the value L = 8\n\n pre_pad is particularly useful when doing crops of a bigger\n image because the padding is then extremely accurate. Defaults\n to False.\n\n \"\"\"\n def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):\n self.J, self.L = J, L\n self.pre_pad = pre_pad\n self.max_order = max_order\n self.shape = shape\n\n self.build()\n\n def build(self):\n self.M, self.N = self.shape\n self.modulus = Modulus()\n self.pad = Pad(2**self.J, pre_pad = self.pre_pad)\n self.subsample_fourier = SubsampleFourier()\n # Create the filters\n self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)\n filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)\n self.Psi = filters['psi']\n self.Phi = [filters['phi'][j] for j in range(self.J)]\n\n def _type(self, _type):\n for key, item in enumerate(self.Psi):\n for key2, item2 in self.Psi[key].items():\n if torch.is_tensor(item2):\n self.Psi[key][key2] = item2.type(_type)\n self.Phi = [v.type(_type) for v in self.Phi]\n self.pad.padding_module.type(_type)\n return self\n\n def cuda(self):\n \"\"\"\n Moves the parameters of the scattering to the GPU\n \"\"\"\n return self._type(torch.cuda.FloatTensor)\n\n def cpu(self):\n \"\"\"\n Moves the parameters of the scattering to the CPU\n \"\"\"\n return self._type(torch.FloatTensor)\n\n def forward(self, input):\n \"\"\"Forward pass of the scattering.\n\n Parameters\n ----------\n input : tensor\n tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.\n :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.\n\n Returns\n -------\n S : tensor\n scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds\n to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.\n\n \"\"\"\n if not torch.is_tensor(input):\n raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))\n\n if (not input.is_contiguous()):\n raise (RuntimeError('Tensor must be contiguous!'))\n\n if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):\n raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))\n\n if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):\n raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))\n\n if (input.dim() != 4):\n raise (RuntimeError('Input tensor must be 4D'))\n\n J = self.J\n phi = self.Phi\n psi = self.Psi\n n = 0\n\n subsample_fourier = self.subsample_fourier\n modulus = self.modulus\n pad = self.pad\n output_size = 1 + self.L*J\n if self.max_order == 2:\n output_size += self.L*self.L*J*(J - 1) // 2\n\n S = input.new(input.size(0),\n input.size(1),\n output_size,\n self.M_padded//(2**J)-2,\n self.N_padded//(2**J)-2)\n U_r = pad(input)\n U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c\n\n # First low pass filter\n U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)\n\n U_J_r = fft(U_1_c, 'C2R')\n\n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n for n1 in range(len(psi)):\n j1 = psi[n1]['j']\n U_1_c = cdgmm(U_0_c, psi[n1][0])\n if(j1 > 0):\n U_1_c = subsample_fourier(U_1_c, k=2 ** j1)\n U_1_c = fft(U_1_c, 'C2C', inverse=True)\n U_1_c = fft(modulus(U_1_c), 'C2C')\n\n # Second low pass filter\n U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))\n U_J_r = fft(U_2_c, 'C2R')\n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n if self.max_order == 2:\n for n2 in range(len(psi)):\n j2 = psi[n2]['j']\n if(j1 < j2):\n U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))\n U_2_c = fft(U_2_c, 'C2C', inverse=True)\n U_2_c = fft(modulus(U_2_c), 'C2C')\n \n # Third low pass filter\n U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))\n U_J_r = fft(U_2_c, 'C2R')\n \n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n return S\n\n def __call__(self, input):\n return self.forward(input)\n", "path": "kymatio/scattering2d/scattering2d.py"}], "after_files": [{"content": "# Authors: Edouard Oyallon\n# Scientific Ancestry: Edouard Oyallon, Laurent Sifre, Joan Bruna\n\n\n__all__ = ['Scattering2D']\n\nimport torch\nfrom .backend import cdgmm, Modulus, SubsampleFourier, fft, Pad, unpad\nfrom .filter_bank import filter_bank\nfrom .utils import compute_padding\n\n\nclass Scattering2D(object):\n \"\"\"Main module implementing the scattering transform in 2D.\n The scattering transform computes two wavelet transform followed\n by modulus non-linearity.\n It can be summarized as::\n\n S_J x = [S_J^0 x, S_J^1 x, S_J^2 x]\n\n where::\n\n S_J^0 x = x * phi_J\n S_J^1 x = [|x * psi^1_lambda| * phi_J]_lambda\n S_J^2 x = [||x * psi^1_lambda| * psi^2_mu| * phi_J]_{lambda, mu}\n\n where * denotes the convolution (in space), phi_J is a low pass\n filter, psi^1_lambda is a family of band pass\n filters and psi^2_mu is another family of band pass filters.\n Only Morlet filters are used in this implementation.\n Convolutions are efficiently performed in the Fourier domain\n with this implementation.\n\n Example\n -------\n # 1) Define a Scattering object as:\n s = Scattering2D(J, M, N)\n # where (M, N) are the image sizes and 2**J the scale of the scattering\n # 2) Forward on an input Variable x of shape B x 1 x M x N,\n # where B is the batch size.\n result_s = s(x)\n\n Parameters\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute. Must be either\n `1` or `2`. Defaults to `2`.\n pre_pad : boolean, optional\n controls the padding: if set to False, a symmetric padding is applied\n on the signal. If set to true, the software will assume the signal was\n padded externally.\n\n Attributes\n ----------\n J : int\n logscale of the scattering\n shape : tuple of int\n spatial support (M, N) of the input\n L : int, optional\n number of angles used for the wavelet transform\n max_order : int, optional\n The maximum order of scattering coefficients to compute.\n Must be either equal to `1` or `2`. Defaults to `2`.\n pre_pad : boolean\n controls the padding\n Psi : dictionary\n containing the wavelets filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n Phi : dictionary\n containing the low-pass filters at all resolutions. See\n filter_bank.filter_bank for an exact description.\n M_padded, N_padded : int\n spatial support of the padded input\n\n Notes\n -----\n The design of the filters is optimized for the value L = 8\n\n pre_pad is particularly useful when doing crops of a bigger\n image because the padding is then extremely accurate. Defaults\n to False.\n\n \"\"\"\n def __init__(self, J, shape, L=8, max_order=2, pre_pad=False):\n self.J, self.L = J, L\n self.pre_pad = pre_pad\n self.max_order = max_order\n self.shape = shape\n\n self.build()\n\n def build(self):\n self.M, self.N = self.shape\n self.modulus = Modulus()\n self.pad = Pad(2**self.J, pre_pad = self.pre_pad)\n self.subsample_fourier = SubsampleFourier()\n # Create the filters\n self.M_padded, self.N_padded = compute_padding(self.M, self.N, self.J)\n filters = filter_bank(self.M_padded, self.N_padded, self.J, self.L)\n self.Psi = filters['psi']\n self.Phi = [filters['phi'][j] for j in range(self.J)]\n\n def _type(self, _type):\n for key, item in enumerate(self.Psi):\n for key2, item2 in self.Psi[key].items():\n if torch.is_tensor(item2):\n self.Psi[key][key2] = item2.type(_type)\n self.Phi = [v.type(_type) for v in self.Phi]\n self.pad.padding_module.type(_type)\n return self\n\n def cuda(self):\n \"\"\"\n Moves the parameters of the scattering to the GPU\n \"\"\"\n return self._type(torch.cuda.FloatTensor)\n\n def cpu(self):\n \"\"\"\n Moves the parameters of the scattering to the CPU\n \"\"\"\n return self._type(torch.FloatTensor)\n\n def forward(self, input):\n \"\"\"Forward pass of the scattering.\n\n Parameters\n ----------\n input : tensor\n tensor with 3 dimensions :math:`(B, C, M, N)` where :math:`(B, C)` are arbitrary.\n :math:`B` typically is the batch size, whereas :math:`C` is the number of input channels.\n\n Returns\n -------\n S : tensor\n scattering of the input, a 4D tensor :math:`(B, C, D, Md, Nd)` where :math:`D` corresponds\n to a new channel dimension and :math:`(Md, Nd)` are downsampled sizes by a factor :math:`2^J`.\n\n \"\"\"\n if not torch.is_tensor(input):\n raise(TypeError('The input should be a torch.cuda.FloatTensor, a torch.FloatTensor or a torch.DoubleTensor'))\n\n if len(input.shape) < 2:\n raise (RuntimeError('Input tensor must have at least two '\n 'dimensions'))\n\n if (not input.is_contiguous()):\n raise (RuntimeError('Tensor must be contiguous!'))\n\n if((input.size(-1)!=self.N or input.size(-2)!=self.M) and not self.pre_pad):\n raise (RuntimeError('Tensor must be of spatial size (%i,%i)!'%(self.M,self.N)))\n\n if ((input.size(-1) != self.N_padded or input.size(-2) != self.M_padded) and self.pre_pad):\n raise (RuntimeError('Padded tensor must be of spatial size (%i,%i)!' % (self.M_padded, self.N_padded)))\n\n batch_shape = input.shape[:-2]\n signal_shape = input.shape[-2:]\n\n input = input.reshape((-1, 1) + signal_shape)\n\n J = self.J\n phi = self.Phi\n psi = self.Psi\n n = 0\n\n subsample_fourier = self.subsample_fourier\n modulus = self.modulus\n pad = self.pad\n output_size = 1 + self.L*J\n if self.max_order == 2:\n output_size += self.L*self.L*J*(J - 1) // 2\n\n S = input.new(input.size(0),\n input.size(1),\n output_size,\n self.M_padded//(2**J)-2,\n self.N_padded//(2**J)-2)\n U_r = pad(input)\n U_0_c = fft(U_r, 'C2C') # We trick here with U_r and U_2_c\n\n # First low pass filter\n U_1_c = subsample_fourier(cdgmm(U_0_c, phi[0]), k=2**J)\n\n U_J_r = fft(U_1_c, 'C2R')\n\n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n for n1 in range(len(psi)):\n j1 = psi[n1]['j']\n U_1_c = cdgmm(U_0_c, psi[n1][0])\n if(j1 > 0):\n U_1_c = subsample_fourier(U_1_c, k=2 ** j1)\n U_1_c = fft(U_1_c, 'C2C', inverse=True)\n U_1_c = fft(modulus(U_1_c), 'C2C')\n\n # Second low pass filter\n U_2_c = subsample_fourier(cdgmm(U_1_c, phi[j1]), k=2**(J-j1))\n U_J_r = fft(U_2_c, 'C2R')\n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n if self.max_order == 2:\n for n2 in range(len(psi)):\n j2 = psi[n2]['j']\n if(j1 < j2):\n U_2_c = subsample_fourier(cdgmm(U_1_c, psi[n2][j1]), k=2 ** (j2-j1))\n U_2_c = fft(U_2_c, 'C2C', inverse=True)\n U_2_c = fft(modulus(U_2_c), 'C2C')\n \n # Third low pass filter\n U_2_c = subsample_fourier(cdgmm(U_2_c, phi[j2]), k=2 ** (J-j2))\n U_J_r = fft(U_2_c, 'C2R')\n \n S[..., n, :, :] = unpad(U_J_r)\n n = n + 1\n\n scattering_shape = S.shape[-3:]\n S = S.reshape(batch_shape + scattering_shape)\n\n return S\n\n def __call__(self, input):\n return self.forward(input)\n", "path": "kymatio/scattering2d/scattering2d.py"}]}
| 3,153 | 387 |
gh_patches_debug_13540
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-7939
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using on_event with string names in Python2.7
As discovered in #7893, trying to set `on_event` with a string (i.e. `"tap"` instead of `events.Tap`) currently fails with:
``` python
=================================== FAILURES ===================================
Test_standalone_docs_json_and_render_items.test_log_warning_if_python_event_callback
self = <bokeh.embed.tests.test_util.Test_standalone_docs_json_and_render_items object at 0x7f6f7a9a5950>
caplog = <_pytest.logging.LogCaptureFixture object at 0x7f6f7a9a57d0>
def test_log_warning_if_python_event_callback(self, caplog):
d = Document()
m1 = EmbedTestUtilModel()
c1 = _GoodEventCallback()
d.add_root(m1)
> m1.on_event('tap', c1)
bokeh/embed/tests/test_util.py:155:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = EmbedTestUtilModel(id='b0037dfa-87fa-48d7-82ea-df2b14d2aff4', ...)
event = 'tap'
callbacks = (<bokeh.embed.tests.test_util._GoodEventCallback object at 0x7f6f7a9a5d90>,)
def on_event(self, event, *callbacks):
> if not isinstance(event, str) and issubclass(event, Event):
E TypeError: issubclass() arg 1 must be a class
bokeh/util/callback_manager.py:31: TypeError
```
We should:
- [ ] Add tests that check setting `on_event` with a string
- [ ] Fix `bokeh/util/callback_manager.py` to accept strings in Python2.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/util/callback_manager.py`
Content:
```
1 ''' Provides ``PropertyCallbackManager`` and ``EventCallbackManager``
2 mixin classes for adding ``on_change`` and ``on_event`` callback
3 interfaces to classes.
4 '''
5 from __future__ import absolute_import
6
7 from ..events import Event
8 from ..util.future import get_param_info, format_signature, signature
9
10 def _check_callback(callback, fargs, what="Callback functions"):
11 '''Bokeh-internal function to check callback signature'''
12 sig = signature(callback)
13 formatted_args = format_signature(sig)
14 error_msg = what + " must have signature func(%s), got func%s"
15
16 all_names, default_values = get_param_info(sig)
17
18 if len(all_names) - len(default_values) != len(fargs):
19 raise ValueError(error_msg % (", ".join(fargs), formatted_args))
20
21 class EventCallbackManager(object):
22 ''' A mixin class to provide an interface for registering and
23 triggering event callbacks on the Python side.
24
25 '''
26 def __init__(self, *args, **kw):
27 super(EventCallbackManager, self).__init__(*args, **kw)
28 self._event_callbacks = dict()
29
30 def on_event(self, event, *callbacks):
31 if not isinstance(event, str) and issubclass(event, Event):
32 event = event.event_name
33
34 for callback in callbacks:
35 _check_callback(callback, ('event',), what='Event callback')
36
37 if event not in self._event_callbacks:
38 self._event_callbacks[event] = [cb for cb in callbacks]
39 else:
40 self._event_callbacks[event].extend(callbacks)
41
42 if event not in self.subscribed_events:
43 self.subscribed_events.append(event)
44
45 def _trigger_event(self, event):
46 for callback in self._event_callbacks.get(event.event_name,[]):
47 if event._model_id is not None and self._id == event._model_id:
48 callback(event)
49
50 def _update_event_callbacks(self):
51 if self.document is None:
52 return
53
54 for key in self._event_callbacks:
55 self.document._subscribed_models[key].add(self)
56
57
58 class PropertyCallbackManager(object):
59 ''' A mixin class to provide an interface for registering and
60 triggering callbacks.
61
62 '''
63
64 def __init__(self, *args, **kw):
65 super(PropertyCallbackManager, self).__init__(*args, **kw)
66 self._callbacks = dict()
67
68 def on_change(self, attr, *callbacks):
69 ''' Add a callback on this object to trigger when ``attr`` changes.
70
71 Args:
72 attr (str) : an attribute name on this object
73 callback (callable) : a callback function to register
74
75 Returns:
76 None
77
78 '''
79 if len(callbacks) == 0:
80 raise ValueError("on_change takes an attribute name and one or more callbacks, got only one parameter")
81
82 _callbacks = self._callbacks.setdefault(attr, [])
83 for callback in callbacks:
84
85 if callback in _callbacks:
86 continue
87
88 _check_callback(callback, ('attr', 'old', 'new'))
89
90 _callbacks.append(callback)
91
92 def remove_on_change(self, attr, *callbacks):
93 ''' Remove a callback from this object '''
94 if len(callbacks) == 0:
95 raise ValueError("remove_on_change takes an attribute name and one or more callbacks, got only one parameter")
96 _callbacks = self._callbacks.setdefault(attr, [])
97 for callback in callbacks:
98 _callbacks.remove(callback)
99
100 def trigger(self, attr, old, new, hint=None, setter=None):
101 ''' Trigger callbacks for ``attr`` on this object.
102
103 Args:
104 attr (str) :
105 old (object) :
106 new (object) :
107
108 Returns:
109 None
110
111 '''
112 def invoke():
113 callbacks = self._callbacks.get(attr)
114 if callbacks:
115 for callback in callbacks:
116 callback(attr, old, new)
117 if hasattr(self, '_document') and self._document is not None:
118 self._document._notify_change(self, attr, old, new, hint, setter, invoke)
119 else:
120 invoke()
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/util/callback_manager.py b/bokeh/util/callback_manager.py
--- a/bokeh/util/callback_manager.py
+++ b/bokeh/util/callback_manager.py
@@ -4,6 +4,8 @@
'''
from __future__ import absolute_import
+from six import string_types
+
from ..events import Event
from ..util.future import get_param_info, format_signature, signature
@@ -28,7 +30,7 @@
self._event_callbacks = dict()
def on_event(self, event, *callbacks):
- if not isinstance(event, str) and issubclass(event, Event):
+ if not isinstance(event, string_types) and issubclass(event, Event):
event = event.event_name
for callback in callbacks:
|
{"golden_diff": "diff --git a/bokeh/util/callback_manager.py b/bokeh/util/callback_manager.py\n--- a/bokeh/util/callback_manager.py\n+++ b/bokeh/util/callback_manager.py\n@@ -4,6 +4,8 @@\n '''\n from __future__ import absolute_import\n \n+from six import string_types\n+\n from ..events import Event\n from ..util.future import get_param_info, format_signature, signature\n \n@@ -28,7 +30,7 @@\n self._event_callbacks = dict()\n \n def on_event(self, event, *callbacks):\n- if not isinstance(event, str) and issubclass(event, Event):\n+ if not isinstance(event, string_types) and issubclass(event, Event):\n event = event.event_name\n \n for callback in callbacks:\n", "issue": "Using on_event with string names in Python2.7\nAs discovered in #7893, trying to set `on_event` with a string (i.e. `\"tap\"` instead of `events.Tap`) currently fails with:\r\n\r\n``` python\r\n=================================== FAILURES ===================================\r\n Test_standalone_docs_json_and_render_items.test_log_warning_if_python_event_callback \r\nself = <bokeh.embed.tests.test_util.Test_standalone_docs_json_and_render_items object at 0x7f6f7a9a5950>\r\ncaplog = <_pytest.logging.LogCaptureFixture object at 0x7f6f7a9a57d0>\r\n def test_log_warning_if_python_event_callback(self, caplog):\r\n d = Document()\r\n m1 = EmbedTestUtilModel()\r\n c1 = _GoodEventCallback()\r\n d.add_root(m1)\r\n \r\n> m1.on_event('tap', c1)\r\nbokeh/embed/tests/test_util.py:155: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nself = EmbedTestUtilModel(id='b0037dfa-87fa-48d7-82ea-df2b14d2aff4', ...)\r\nevent = 'tap'\r\ncallbacks = (<bokeh.embed.tests.test_util._GoodEventCallback object at 0x7f6f7a9a5d90>,)\r\n def on_event(self, event, *callbacks):\r\n> if not isinstance(event, str) and issubclass(event, Event):\r\nE TypeError: issubclass() arg 1 must be a class\r\nbokeh/util/callback_manager.py:31: TypeError\r\n```\r\n\r\nWe should:\r\n- [ ] Add tests that check setting `on_event` with a string\r\n- [ ] Fix `bokeh/util/callback_manager.py` to accept strings in Python2.7\n", "before_files": [{"content": "''' Provides ``PropertyCallbackManager`` and ``EventCallbackManager``\nmixin classes for adding ``on_change`` and ``on_event`` callback\ninterfaces to classes.\n'''\nfrom __future__ import absolute_import\n\nfrom ..events import Event\nfrom ..util.future import get_param_info, format_signature, signature\n\ndef _check_callback(callback, fargs, what=\"Callback functions\"):\n '''Bokeh-internal function to check callback signature'''\n sig = signature(callback)\n formatted_args = format_signature(sig)\n error_msg = what + \" must have signature func(%s), got func%s\"\n\n all_names, default_values = get_param_info(sig)\n\n if len(all_names) - len(default_values) != len(fargs):\n raise ValueError(error_msg % (\", \".join(fargs), formatted_args))\n\nclass EventCallbackManager(object):\n ''' A mixin class to provide an interface for registering and\n triggering event callbacks on the Python side.\n\n '''\n def __init__(self, *args, **kw):\n super(EventCallbackManager, self).__init__(*args, **kw)\n self._event_callbacks = dict()\n\n def on_event(self, event, *callbacks):\n if not isinstance(event, str) and issubclass(event, Event):\n event = event.event_name\n\n for callback in callbacks:\n _check_callback(callback, ('event',), what='Event callback')\n\n if event not in self._event_callbacks:\n self._event_callbacks[event] = [cb for cb in callbacks]\n else:\n self._event_callbacks[event].extend(callbacks)\n\n if event not in self.subscribed_events:\n self.subscribed_events.append(event)\n\n def _trigger_event(self, event):\n for callback in self._event_callbacks.get(event.event_name,[]):\n if event._model_id is not None and self._id == event._model_id:\n callback(event)\n\n def _update_event_callbacks(self):\n if self.document is None:\n return\n\n for key in self._event_callbacks:\n self.document._subscribed_models[key].add(self)\n\n\nclass PropertyCallbackManager(object):\n ''' A mixin class to provide an interface for registering and\n triggering callbacks.\n\n '''\n\n def __init__(self, *args, **kw):\n super(PropertyCallbackManager, self).__init__(*args, **kw)\n self._callbacks = dict()\n\n def on_change(self, attr, *callbacks):\n ''' Add a callback on this object to trigger when ``attr`` changes.\n\n Args:\n attr (str) : an attribute name on this object\n callback (callable) : a callback function to register\n\n Returns:\n None\n\n '''\n if len(callbacks) == 0:\n raise ValueError(\"on_change takes an attribute name and one or more callbacks, got only one parameter\")\n\n _callbacks = self._callbacks.setdefault(attr, [])\n for callback in callbacks:\n\n if callback in _callbacks:\n continue\n\n _check_callback(callback, ('attr', 'old', 'new'))\n\n _callbacks.append(callback)\n\n def remove_on_change(self, attr, *callbacks):\n ''' Remove a callback from this object '''\n if len(callbacks) == 0:\n raise ValueError(\"remove_on_change takes an attribute name and one or more callbacks, got only one parameter\")\n _callbacks = self._callbacks.setdefault(attr, [])\n for callback in callbacks:\n _callbacks.remove(callback)\n\n def trigger(self, attr, old, new, hint=None, setter=None):\n ''' Trigger callbacks for ``attr`` on this object.\n\n Args:\n attr (str) :\n old (object) :\n new (object) :\n\n Returns:\n None\n\n '''\n def invoke():\n callbacks = self._callbacks.get(attr)\n if callbacks:\n for callback in callbacks:\n callback(attr, old, new)\n if hasattr(self, '_document') and self._document is not None:\n self._document._notify_change(self, attr, old, new, hint, setter, invoke)\n else:\n invoke()\n", "path": "bokeh/util/callback_manager.py"}], "after_files": [{"content": "''' Provides ``PropertyCallbackManager`` and ``EventCallbackManager``\nmixin classes for adding ``on_change`` and ``on_event`` callback\ninterfaces to classes.\n'''\nfrom __future__ import absolute_import\n\nfrom six import string_types\n\nfrom ..events import Event\nfrom ..util.future import get_param_info, format_signature, signature\n\ndef _check_callback(callback, fargs, what=\"Callback functions\"):\n '''Bokeh-internal function to check callback signature'''\n sig = signature(callback)\n formatted_args = format_signature(sig)\n error_msg = what + \" must have signature func(%s), got func%s\"\n\n all_names, default_values = get_param_info(sig)\n\n if len(all_names) - len(default_values) != len(fargs):\n raise ValueError(error_msg % (\", \".join(fargs), formatted_args))\n\nclass EventCallbackManager(object):\n ''' A mixin class to provide an interface for registering and\n triggering event callbacks on the Python side.\n\n '''\n def __init__(self, *args, **kw):\n super(EventCallbackManager, self).__init__(*args, **kw)\n self._event_callbacks = dict()\n\n def on_event(self, event, *callbacks):\n if not isinstance(event, string_types) and issubclass(event, Event):\n event = event.event_name\n\n for callback in callbacks:\n _check_callback(callback, ('event',), what='Event callback')\n\n if event not in self._event_callbacks:\n self._event_callbacks[event] = [cb for cb in callbacks]\n else:\n self._event_callbacks[event].extend(callbacks)\n\n if event not in self.subscribed_events:\n self.subscribed_events.append(event)\n\n def _trigger_event(self, event):\n for callback in self._event_callbacks.get(event.event_name,[]):\n if event._model_id is not None and self._id == event._model_id:\n callback(event)\n\n def _update_event_callbacks(self):\n if self.document is None:\n return\n\n for key in self._event_callbacks:\n self.document._subscribed_models[key].add(self)\n\n\nclass PropertyCallbackManager(object):\n ''' A mixin class to provide an interface for registering and\n triggering callbacks.\n\n '''\n\n def __init__(self, *args, **kw):\n super(PropertyCallbackManager, self).__init__(*args, **kw)\n self._callbacks = dict()\n\n def on_change(self, attr, *callbacks):\n ''' Add a callback on this object to trigger when ``attr`` changes.\n\n Args:\n attr (str) : an attribute name on this object\n callback (callable) : a callback function to register\n\n Returns:\n None\n\n '''\n if len(callbacks) == 0:\n raise ValueError(\"on_change takes an attribute name and one or more callbacks, got only one parameter\")\n\n _callbacks = self._callbacks.setdefault(attr, [])\n for callback in callbacks:\n\n if callback in _callbacks:\n continue\n\n _check_callback(callback, ('attr', 'old', 'new'))\n\n _callbacks.append(callback)\n\n def remove_on_change(self, attr, *callbacks):\n ''' Remove a callback from this object '''\n if len(callbacks) == 0:\n raise ValueError(\"remove_on_change takes an attribute name and one or more callbacks, got only one parameter\")\n _callbacks = self._callbacks.setdefault(attr, [])\n for callback in callbacks:\n _callbacks.remove(callback)\n\n def trigger(self, attr, old, new, hint=None, setter=None):\n ''' Trigger callbacks for ``attr`` on this object.\n\n Args:\n attr (str) :\n old (object) :\n new (object) :\n\n Returns:\n None\n\n '''\n def invoke():\n callbacks = self._callbacks.get(attr)\n if callbacks:\n for callback in callbacks:\n callback(attr, old, new)\n if hasattr(self, '_document') and self._document is not None:\n self._document._notify_change(self, attr, old, new, hint, setter, invoke)\n else:\n invoke()\n", "path": "bokeh/util/callback_manager.py"}]}
| 1,803 | 165 |
gh_patches_debug_6987
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-750
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
-f does not work as expected in rio-warp
Running on a preexisting file
```
» rio warp tests/data/RGB.byte.tif /tmp/hi.tif
Error: Could not open file : file exists and won't be overwritten without use of the `-f` or `-o` options.
```
Then
```
» rio warp tests/data/RGB.byte.tif /tmp/hi.tif -f
Error: -f option requires an argument
```
But
```
rio warp tests/data/RGB.byte.tif /tmp/hi.tif --force-overwrite
```
Works.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/helpers.py`
Content:
```
1 """
2 Helper objects used by multiple CLI commands.
3 """
4
5 import json
6 import os
7
8 from rasterio.errors import FileOverwriteError
9
10
11 def coords(obj):
12 """Yield all coordinate coordinate tuples from a geometry or feature.
13 From python-geojson package."""
14 if isinstance(obj, (tuple, list)):
15 coordinates = obj
16 elif 'geometry' in obj:
17 coordinates = obj['geometry']['coordinates']
18 else:
19 coordinates = obj.get('coordinates', obj)
20 for e in coordinates:
21 if isinstance(e, (float, int)):
22 yield tuple(coordinates)
23 break
24 else:
25 for f in coords(e):
26 yield f
27
28
29 def write_features(
30 fobj, collection, sequence=False, geojson_type='feature', use_rs=False,
31 **dump_kwds):
32 """Read an iterator of (feat, bbox) pairs and write to file using
33 the selected modes."""
34 # Sequence of features expressed as bbox, feature, or collection.
35 if sequence:
36 for feat in collection():
37 xs, ys = zip(*coords(feat))
38 bbox = (min(xs), min(ys), max(xs), max(ys))
39 if use_rs:
40 fobj.write(u'\u001e')
41 if geojson_type == 'feature':
42 fobj.write(json.dumps(feat, **dump_kwds))
43 elif geojson_type == 'bbox':
44 fobj.write(json.dumps(bbox, **dump_kwds))
45 else:
46 fobj.write(
47 json.dumps({
48 'type': 'FeatureCollection',
49 'bbox': bbox,
50 'features': [feat]}, **dump_kwds))
51 fobj.write('\n')
52 # Aggregate all features into a single object expressed as
53 # bbox or collection.
54 else:
55 features = list(collection())
56 if geojson_type == 'bbox':
57 fobj.write(json.dumps(collection.bbox, **dump_kwds))
58 elif geojson_type == 'feature':
59 fobj.write(json.dumps(features[0], **dump_kwds))
60 else:
61 fobj.write(json.dumps({
62 'bbox': collection.bbox,
63 'type': 'FeatureCollection',
64 'features': features},
65 **dump_kwds))
66 fobj.write('\n')
67
68
69 def resolve_inout(input=None, output=None, files=None, force_overwrite=False):
70 """Resolves inputs and outputs from standard args and options.
71
72 :param input: a single input filename, optional.
73 :param output: a single output filename, optional.
74 :param files: a sequence of filenames in which the last is the
75 output filename.
76 :param force_overwrite: whether to force overwriting the output
77 file, bool.
78 :return: the resolved output filename and input filenames as a
79 tuple of length 2.
80
81 If provided, the :param:`output` file may be overwritten. An output
82 file extracted from :param:`files` will not be overwritten unless
83 :param:`force_overwrite` is `True`.
84 """
85 resolved_output = output or (files[-1] if files else None)
86 force_overwrite = output is not None or force_overwrite
87 if not force_overwrite and resolved_output and os.path.exists(
88 resolved_output):
89 raise FileOverwriteError(
90 "file exists and won't be overwritten without use of the "
91 "`-f` or `-o` options.")
92 resolved_inputs = (
93 [input] if input else [] +
94 list(files[:-1 if not output else None]) if files else [])
95 return resolved_output, resolved_inputs
96
97
98 def to_lower(ctx, param, value):
99 """Click callback, converts values to lowercase."""
100 return value.lower()
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/rio/helpers.py b/rasterio/rio/helpers.py
--- a/rasterio/rio/helpers.py
+++ b/rasterio/rio/helpers.py
@@ -88,7 +88,7 @@
resolved_output):
raise FileOverwriteError(
"file exists and won't be overwritten without use of the "
- "`-f` or `-o` options.")
+ "`--force-overwrite` or `--output` options.")
resolved_inputs = (
[input] if input else [] +
list(files[:-1 if not output else None]) if files else [])
|
{"golden_diff": "diff --git a/rasterio/rio/helpers.py b/rasterio/rio/helpers.py\n--- a/rasterio/rio/helpers.py\n+++ b/rasterio/rio/helpers.py\n@@ -88,7 +88,7 @@\n resolved_output):\n raise FileOverwriteError(\n \"file exists and won't be overwritten without use of the \"\n- \"`-f` or `-o` options.\")\n+ \"`--force-overwrite` or `--output` options.\")\n resolved_inputs = (\n [input] if input else [] +\n list(files[:-1 if not output else None]) if files else [])\n", "issue": "-f does not work as expected in rio-warp\nRunning on a preexisting file\n\n```\n\u00bb rio warp tests/data/RGB.byte.tif /tmp/hi.tif\nError: Could not open file : file exists and won't be overwritten without use of the `-f` or `-o` options.\n```\n\nThen\n\n```\n\u00bb rio warp tests/data/RGB.byte.tif /tmp/hi.tif -f\nError: -f option requires an argument\n```\n\nBut\n\n```\nrio warp tests/data/RGB.byte.tif /tmp/hi.tif --force-overwrite\n```\n\nWorks.\n\n", "before_files": [{"content": "\"\"\"\nHelper objects used by multiple CLI commands.\n\"\"\"\n\nimport json\nimport os\n\nfrom rasterio.errors import FileOverwriteError\n\n\ndef coords(obj):\n \"\"\"Yield all coordinate coordinate tuples from a geometry or feature.\n From python-geojson package.\"\"\"\n if isinstance(obj, (tuple, list)):\n coordinates = obj\n elif 'geometry' in obj:\n coordinates = obj['geometry']['coordinates']\n else:\n coordinates = obj.get('coordinates', obj)\n for e in coordinates:\n if isinstance(e, (float, int)):\n yield tuple(coordinates)\n break\n else:\n for f in coords(e):\n yield f\n\n\ndef write_features(\n fobj, collection, sequence=False, geojson_type='feature', use_rs=False,\n **dump_kwds):\n \"\"\"Read an iterator of (feat, bbox) pairs and write to file using\n the selected modes.\"\"\"\n # Sequence of features expressed as bbox, feature, or collection.\n if sequence:\n for feat in collection():\n xs, ys = zip(*coords(feat))\n bbox = (min(xs), min(ys), max(xs), max(ys))\n if use_rs:\n fobj.write(u'\\u001e')\n if geojson_type == 'feature':\n fobj.write(json.dumps(feat, **dump_kwds))\n elif geojson_type == 'bbox':\n fobj.write(json.dumps(bbox, **dump_kwds))\n else:\n fobj.write(\n json.dumps({\n 'type': 'FeatureCollection',\n 'bbox': bbox,\n 'features': [feat]}, **dump_kwds))\n fobj.write('\\n')\n # Aggregate all features into a single object expressed as\n # bbox or collection.\n else:\n features = list(collection())\n if geojson_type == 'bbox':\n fobj.write(json.dumps(collection.bbox, **dump_kwds))\n elif geojson_type == 'feature':\n fobj.write(json.dumps(features[0], **dump_kwds))\n else:\n fobj.write(json.dumps({\n 'bbox': collection.bbox,\n 'type': 'FeatureCollection',\n 'features': features},\n **dump_kwds))\n fobj.write('\\n')\n\n\ndef resolve_inout(input=None, output=None, files=None, force_overwrite=False):\n \"\"\"Resolves inputs and outputs from standard args and options.\n\n :param input: a single input filename, optional.\n :param output: a single output filename, optional.\n :param files: a sequence of filenames in which the last is the\n output filename.\n :param force_overwrite: whether to force overwriting the output\n file, bool.\n :return: the resolved output filename and input filenames as a\n tuple of length 2.\n\n If provided, the :param:`output` file may be overwritten. An output\n file extracted from :param:`files` will not be overwritten unless\n :param:`force_overwrite` is `True`.\n \"\"\"\n resolved_output = output or (files[-1] if files else None)\n force_overwrite = output is not None or force_overwrite\n if not force_overwrite and resolved_output and os.path.exists(\n resolved_output):\n raise FileOverwriteError(\n \"file exists and won't be overwritten without use of the \"\n \"`-f` or `-o` options.\")\n resolved_inputs = (\n [input] if input else [] +\n list(files[:-1 if not output else None]) if files else [])\n return resolved_output, resolved_inputs\n\n\ndef to_lower(ctx, param, value):\n \"\"\"Click callback, converts values to lowercase.\"\"\"\n return value.lower()\n", "path": "rasterio/rio/helpers.py"}], "after_files": [{"content": "\"\"\"\nHelper objects used by multiple CLI commands.\n\"\"\"\n\nimport json\nimport os\n\nfrom rasterio.errors import FileOverwriteError\n\n\ndef coords(obj):\n \"\"\"Yield all coordinate coordinate tuples from a geometry or feature.\n From python-geojson package.\"\"\"\n if isinstance(obj, (tuple, list)):\n coordinates = obj\n elif 'geometry' in obj:\n coordinates = obj['geometry']['coordinates']\n else:\n coordinates = obj.get('coordinates', obj)\n for e in coordinates:\n if isinstance(e, (float, int)):\n yield tuple(coordinates)\n break\n else:\n for f in coords(e):\n yield f\n\n\ndef write_features(\n fobj, collection, sequence=False, geojson_type='feature', use_rs=False,\n **dump_kwds):\n \"\"\"Read an iterator of (feat, bbox) pairs and write to file using\n the selected modes.\"\"\"\n # Sequence of features expressed as bbox, feature, or collection.\n if sequence:\n for feat in collection():\n xs, ys = zip(*coords(feat))\n bbox = (min(xs), min(ys), max(xs), max(ys))\n if use_rs:\n fobj.write(u'\\u001e')\n if geojson_type == 'feature':\n fobj.write(json.dumps(feat, **dump_kwds))\n elif geojson_type == 'bbox':\n fobj.write(json.dumps(bbox, **dump_kwds))\n else:\n fobj.write(\n json.dumps({\n 'type': 'FeatureCollection',\n 'bbox': bbox,\n 'features': [feat]}, **dump_kwds))\n fobj.write('\\n')\n # Aggregate all features into a single object expressed as\n # bbox or collection.\n else:\n features = list(collection())\n if geojson_type == 'bbox':\n fobj.write(json.dumps(collection.bbox, **dump_kwds))\n elif geojson_type == 'feature':\n fobj.write(json.dumps(features[0], **dump_kwds))\n else:\n fobj.write(json.dumps({\n 'bbox': collection.bbox,\n 'type': 'FeatureCollection',\n 'features': features},\n **dump_kwds))\n fobj.write('\\n')\n\n\ndef resolve_inout(input=None, output=None, files=None, force_overwrite=False):\n \"\"\"Resolves inputs and outputs from standard args and options.\n\n :param input: a single input filename, optional.\n :param output: a single output filename, optional.\n :param files: a sequence of filenames in which the last is the\n output filename.\n :param force_overwrite: whether to force overwriting the output\n file, bool.\n :return: the resolved output filename and input filenames as a\n tuple of length 2.\n\n If provided, the :param:`output` file may be overwritten. An output\n file extracted from :param:`files` will not be overwritten unless\n :param:`force_overwrite` is `True`.\n \"\"\"\n resolved_output = output or (files[-1] if files else None)\n force_overwrite = output is not None or force_overwrite\n if not force_overwrite and resolved_output and os.path.exists(\n resolved_output):\n raise FileOverwriteError(\n \"file exists and won't be overwritten without use of the \"\n \"`--force-overwrite` or `--output` options.\")\n resolved_inputs = (\n [input] if input else [] +\n list(files[:-1 if not output else None]) if files else [])\n return resolved_output, resolved_inputs\n\n\ndef to_lower(ctx, param, value):\n \"\"\"Click callback, converts values to lowercase.\"\"\"\n return value.lower()\n", "path": "rasterio/rio/helpers.py"}]}
| 1,361 | 133 |
gh_patches_debug_8115
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__aimmo-51
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Submitting empty code leads to server error
When sending empty program, we are getting:
Traceback:
File "/Library/Python/2.7/site-packages/django/core/handlers/base.py" in get_response
1. response = wrapped_callback(request, _callback_args, *_callback_kwargs)
File "/Library/Python/2.7/site-packages/django/contrib/auth/decorators.py" in _wrapped_view
2. return view_func(request, _args, *_kwargs)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/ui/players/views.py" in code
3. world.player_changed_code(request.user.id, request.user.player.code)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/game_state.py" in player_changed_code
4. avatar.set_code(code)
File "/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/avatar/avatar_wrapper.py" in set_code
5. self.avatar = Avatar()
Exception Type: TypeError at /api/code/
Exception Value: 'NoneType' object is not callable
Simple check should be enough, but we could use it as a chance to implement an extension of validation or error handling/printing when submitting code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `simulation/avatar/avatar_wrapper.py`
Content:
```
1 import traceback
2 import sys
3
4 from simulation.action import WaitAction
5
6
7 # This class will be implemented by the player
8 Avatar = None
9
10
11 class UserCodeException(Exception):
12 def __init__(self, *args, **kwargs):
13 super(Exception, self).__init__(*args, **kwargs)
14 self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()
15
16 def to_user_string(self):
17 lines = traceback.format_exception(self.exc_type, self.exc_value, self.exc_traceback)
18 return '<br/>'.join(lines)
19
20
21 class AvatarWrapper(object):
22 """
23 The application's view of a character, not to be confused with "Avatar", the player-supplied code.
24 """
25
26 def __init__(self, initial_location, initial_code, player_id, avatar_appearance):
27 self.location = initial_location
28 self.health = 5
29 self.score = 0
30 self.events = []
31 self.player_id = player_id
32 self.avatar_appearance = avatar_appearance
33 self.avatar = None
34
35 self.set_code(initial_code)
36
37 def handle_turn(self, state):
38 try:
39 next_action = self.avatar.handle_turn(state, self.events)
40 except Exception as e:
41 # TODO: tell user their program threw an exception during execution somehow...
42 print('avatar threw exception during handle_turn:', e)
43 traceback.print_exc()
44 next_action = WaitAction()
45 # Reset event log
46 self.events = []
47
48 return next_action
49
50 def die(self, respawn_location):
51 # TODO: extract settings for health and score loss on death
52 self.health = 5
53 self.score = max(0, self.score - 2)
54 self.location = respawn_location
55
56 def add_event(self, event):
57 self.events.append(event)
58
59 def set_code(self, code):
60 self.code = code
61 try:
62 exec(code)
63 except Exception as ex:
64 raise UserCodeException("Exception in user code", ex)
65 self.avatar = Avatar()
66
67 def __repr__(self):
68 return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,
69 self.health, self.score)
70
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/simulation/avatar/avatar_wrapper.py b/simulation/avatar/avatar_wrapper.py
--- a/simulation/avatar/avatar_wrapper.py
+++ b/simulation/avatar/avatar_wrapper.py
@@ -60,9 +60,9 @@
self.code = code
try:
exec(code)
+ self.avatar = Avatar()
except Exception as ex:
raise UserCodeException("Exception in user code", ex)
- self.avatar = Avatar()
def __repr__(self):
return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,
|
{"golden_diff": "diff --git a/simulation/avatar/avatar_wrapper.py b/simulation/avatar/avatar_wrapper.py\n--- a/simulation/avatar/avatar_wrapper.py\n+++ b/simulation/avatar/avatar_wrapper.py\n@@ -60,9 +60,9 @@\n self.code = code\n try:\n exec(code)\n+ self.avatar = Avatar()\n except Exception as ex:\n raise UserCodeException(\"Exception in user code\", ex)\n- self.avatar = Avatar()\n \n def __repr__(self):\n return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,\n", "issue": "Submitting empty code leads to server error\nWhen sending empty program, we are getting:\n\nTraceback:\nFile \"/Library/Python/2.7/site-packages/django/core/handlers/base.py\" in get_response\n1. response = wrapped_callback(request, _callback_args, *_callback_kwargs)\n File \"/Library/Python/2.7/site-packages/django/contrib/auth/decorators.py\" in _wrapped_view\n2. return view_func(request, _args, *_kwargs)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/ui/players/views.py\" in code\n3. world.player_changed_code(request.user.id, request.user.player.code)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/game_state.py\" in player_changed_code\n4. avatar.set_code(code)\n File \"/Users/paulina.koch/Documents/C4L/forks/aimmo/simulation/avatar/avatar_wrapper.py\" in set_code\n5. self.avatar = Avatar()\n\nException Type: TypeError at /api/code/\nException Value: 'NoneType' object is not callable\n\nSimple check should be enough, but we could use it as a chance to implement an extension of validation or error handling/printing when submitting code.\n\n", "before_files": [{"content": "import traceback\nimport sys\n\nfrom simulation.action import WaitAction\n\n\n# This class will be implemented by the player\nAvatar = None\n\n\nclass UserCodeException(Exception):\n def __init__(self, *args, **kwargs):\n super(Exception, self).__init__(*args, **kwargs)\n self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()\n\n def to_user_string(self):\n lines = traceback.format_exception(self.exc_type, self.exc_value, self.exc_traceback)\n return '<br/>'.join(lines)\n\n\nclass AvatarWrapper(object):\n \"\"\"\n The application's view of a character, not to be confused with \"Avatar\", the player-supplied code.\n \"\"\"\n\n def __init__(self, initial_location, initial_code, player_id, avatar_appearance):\n self.location = initial_location\n self.health = 5\n self.score = 0\n self.events = []\n self.player_id = player_id\n self.avatar_appearance = avatar_appearance\n self.avatar = None\n\n self.set_code(initial_code)\n\n def handle_turn(self, state):\n try:\n next_action = self.avatar.handle_turn(state, self.events)\n except Exception as e:\n # TODO: tell user their program threw an exception during execution somehow...\n print('avatar threw exception during handle_turn:', e)\n traceback.print_exc()\n next_action = WaitAction()\n # Reset event log\n self.events = []\n\n return next_action\n\n def die(self, respawn_location):\n # TODO: extract settings for health and score loss on death\n self.health = 5\n self.score = max(0, self.score - 2)\n self.location = respawn_location\n\n def add_event(self, event):\n self.events.append(event)\n\n def set_code(self, code):\n self.code = code\n try:\n exec(code)\n except Exception as ex:\n raise UserCodeException(\"Exception in user code\", ex)\n self.avatar = Avatar()\n\n def __repr__(self):\n return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,\n self.health, self.score)\n\n", "path": "simulation/avatar/avatar_wrapper.py"}], "after_files": [{"content": "import traceback\nimport sys\n\nfrom simulation.action import WaitAction\n\n\n# This class will be implemented by the player\nAvatar = None\n\n\nclass UserCodeException(Exception):\n def __init__(self, *args, **kwargs):\n super(Exception, self).__init__(*args, **kwargs)\n self.exc_type, self.exc_value, self.exc_traceback = sys.exc_info()\n\n def to_user_string(self):\n lines = traceback.format_exception(self.exc_type, self.exc_value, self.exc_traceback)\n return '<br/>'.join(lines)\n\n\nclass AvatarWrapper(object):\n \"\"\"\n The application's view of a character, not to be confused with \"Avatar\", the player-supplied code.\n \"\"\"\n\n def __init__(self, initial_location, initial_code, player_id, avatar_appearance):\n self.location = initial_location\n self.health = 5\n self.score = 0\n self.events = []\n self.player_id = player_id\n self.avatar_appearance = avatar_appearance\n self.avatar = None\n\n self.set_code(initial_code)\n\n def handle_turn(self, state):\n try:\n next_action = self.avatar.handle_turn(state, self.events)\n except Exception as e:\n # TODO: tell user their program threw an exception during execution somehow...\n print('avatar threw exception during handle_turn:', e)\n traceback.print_exc()\n next_action = WaitAction()\n # Reset event log\n self.events = []\n\n return next_action\n\n def die(self, respawn_location):\n # TODO: extract settings for health and score loss on death\n self.health = 5\n self.score = max(0, self.score - 2)\n self.location = respawn_location\n\n def add_event(self, event):\n self.events.append(event)\n\n def set_code(self, code):\n self.code = code\n try:\n exec(code)\n self.avatar = Avatar()\n except Exception as ex:\n raise UserCodeException(\"Exception in user code\", ex)\n\n def __repr__(self):\n return 'Avatar(id={}, location={}, health={}, score={})'.format(self.player_id, self.location,\n self.health, self.score)\n\n", "path": "simulation/avatar/avatar_wrapper.py"}]}
| 1,130 | 124 |
gh_patches_debug_1611
|
rasdani/github-patches
|
git_diff
|
google__mobly-311
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Exceptions in `setup_test` should leave the test in `ERROR` status
Regardless of the type of the exception, `setup_test` error should cause `ERROR` status.
This is different from a test method.
In a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status.
This is to be consistent with pyunit's behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mobly/signals.py`
Content:
```
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """This module is where all the test signal classes and related utilities live.
15 """
16
17 import functools
18 import json
19
20
21 class TestSignalError(Exception):
22 """Raised when an error occurs inside a test signal."""
23
24
25 class TestSignal(Exception):
26 """Base class for all test result control signals. This is used to signal
27 the result of a test.
28
29 Attribute:
30 details: A string that describes the reason for raising this signal.
31 extras: A json-serializable data type to convey extra information about
32 a test result.
33 """
34
35 def __init__(self, details, extras=None):
36 super(TestSignal, self).__init__(details)
37 self.details = str(details)
38 try:
39 json.dumps(extras)
40 self.extras = extras
41 except TypeError:
42 raise TestSignalError('Extras must be json serializable. %s '
43 'is not.' % extras)
44
45 def __str__(self):
46 return 'Details=%s, Extras=%s' % (self.details, self.extras)
47
48
49 class TestFailure(TestSignal):
50 """Raised when a test has failed."""
51
52
53 class TestPass(TestSignal):
54 """Raised when a test has passed."""
55
56
57 class TestSkip(TestSignal):
58 """Raised when a test has been skipped."""
59
60
61 class TestAbortClass(TestSignal):
62 """Raised when all subsequent tests within the same test class should
63 be aborted.
64 """
65
66
67 class TestAbortAll(TestSignal):
68 """Raised when all subsequent tests should be aborted."""
69
70
71 class ControllerError(Exception):
72 """Raised when an error occured in controller classes."""
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mobly/signals.py b/mobly/signals.py
--- a/mobly/signals.py
+++ b/mobly/signals.py
@@ -46,6 +46,10 @@
return 'Details=%s, Extras=%s' % (self.details, self.extras)
+class TestError(TestSignal):
+ """Raised when a test has an unexpected error."""
+
+
class TestFailure(TestSignal):
"""Raised when a test has failed."""
|
{"golden_diff": "diff --git a/mobly/signals.py b/mobly/signals.py\n--- a/mobly/signals.py\n+++ b/mobly/signals.py\n@@ -46,6 +46,10 @@\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n \n \n+class TestError(TestSignal):\n+ \"\"\"Raised when a test has an unexpected error.\"\"\"\n+\n+\n class TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n", "issue": "Exceptions in `setup_test` should leave the test in `ERROR` status\nRegardless of the type of the exception, `setup_test` error should cause `ERROR` status.\r\n\r\nThis is different from a test method.\r\nIn a test method, an exception based on signals.TestFailure should cause the test to exit with `FAILED` status.\r\n\r\nThis is to be consistent with pyunit's behavior.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is where all the test signal classes and related utilities live.\n\"\"\"\n\nimport functools\nimport json\n\n\nclass TestSignalError(Exception):\n \"\"\"Raised when an error occurs inside a test signal.\"\"\"\n\n\nclass TestSignal(Exception):\n \"\"\"Base class for all test result control signals. This is used to signal\n the result of a test.\n\n Attribute:\n details: A string that describes the reason for raising this signal.\n extras: A json-serializable data type to convey extra information about\n a test result.\n \"\"\"\n\n def __init__(self, details, extras=None):\n super(TestSignal, self).__init__(details)\n self.details = str(details)\n try:\n json.dumps(extras)\n self.extras = extras\n except TypeError:\n raise TestSignalError('Extras must be json serializable. %s '\n 'is not.' % extras)\n\n def __str__(self):\n return 'Details=%s, Extras=%s' % (self.details, self.extras)\n\n\nclass TestError(TestSignal):\n \"\"\"Raised when a test has an unexpected error.\"\"\"\n\n\nclass TestFailure(TestSignal):\n \"\"\"Raised when a test has failed.\"\"\"\n\n\nclass TestPass(TestSignal):\n \"\"\"Raised when a test has passed.\"\"\"\n\n\nclass TestSkip(TestSignal):\n \"\"\"Raised when a test has been skipped.\"\"\"\n\n\nclass TestAbortClass(TestSignal):\n \"\"\"Raised when all subsequent tests within the same test class should\n be aborted.\n \"\"\"\n\n\nclass TestAbortAll(TestSignal):\n \"\"\"Raised when all subsequent tests should be aborted.\"\"\"\n\n\nclass ControllerError(Exception):\n \"\"\"Raised when an error occured in controller classes.\"\"\"\n", "path": "mobly/signals.py"}]}
| 942 | 106 |
gh_patches_debug_31916
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NotADirectoryError when running --generate-hashes
Running `pip-compile` works flawless, but pip-compile `--generate-hashes` produces the following error:
> Traceback (most recent call last):
> File "/home/ric/.local/bin/pip-compile", line 11, in <module>
> sys.exit(cli())
> File "/home/ric/.local/lib/python3.5/site-packages/click/core.py", line 722, in __call__
> return self.main(*args, **kwargs)
> File "/home/ric/.local/lib/python3.5/site-packages/click/core.py", line 697, in main
> rv = self.invoke(ctx)
> File "/home/ric/.local/lib/python3.5/site-packages/click/core.py", line 895, in invoke
> return ctx.invoke(self.callback, **ctx.params)
> File "/home/ric/.local/lib/python3.5/site-packages/click/core.py", line 535, in invoke
> return callback(*args, **kwargs)
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/scripts/compile.py", line 186, in cli
> hashes = resolver.resolve_hashes(results)
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/resolver.py", line 77, in resolve_hashes
> return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/resolver.py", line 77, in <dictcomp>
> return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py", line 169, in get_hashes
> for candidate in matching_candidates
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py", line 169, in <setcomp>
> for candidate in matching_candidates
> File "/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py", line 176, in _get_file_hash
> download_dir=tmpdir, only_download=True, session=self.session
> File "/usr/lib/python3/dist-packages/pip/download.py", line 821, in unpack_url
> hashes=hashes
> File "/usr/lib/python3/dist-packages/pip/download.py", line 663, in unpack_http_url
> unpack_file(from_path, location, content_type, link)
> File "/usr/lib/python3/dist-packages/pip/utils/__init__.py", line 623, in unpack_file
> untar_file(filename, location)
> File "/usr/lib/python3/dist-packages/pip/utils/__init__.py", line 595, in untar_file
> with open(path, 'wb') as destfp:
> NotADirectoryError: [Errno 20] Not a directory: '/tmp/tmprk7taktxbuild/LICENSE/LICENSE_STIX'
>
##### Environment Versions
1. Ubuntu 16.04
1. Python 3.5.3
1. pip 9.0.1
1. pip-compile 1.9.0
This is my `requirements.in` file:
>
> seaborn==0.7.1
> pandas==0.20.1
> numpy==1.12.1
> scipy==0.19.0
> tqdm==4.11.2
> matplotlib==2.0.2
>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/repositories/pypi.py`
Content:
```
1 # coding: utf-8
2 from __future__ import (absolute_import, division, print_function,
3 unicode_literals)
4
5 import hashlib
6 import os
7 from shutil import rmtree
8
9 from pip.download import unpack_url
10 from pip.index import PackageFinder
11 from pip.req.req_set import RequirementSet
12 from pip.wheel import Wheel
13 try:
14 from pip.utils.hashes import FAVORITE_HASH
15 except ImportError:
16 FAVORITE_HASH = 'sha256'
17
18 from ..cache import CACHE_DIR
19 from ..exceptions import NoCandidateFound
20 from ..utils import (fs_str, is_pinned_requirement, lookup_table,
21 make_install_requirement, pip_version_info)
22 from .base import BaseRepository
23
24 try:
25 from tempfile import TemporaryDirectory # added in 3.2
26 except ImportError:
27 from .._compat import TemporaryDirectory
28
29
30 # Monkey patch pip's Wheel class to support all platform tags. This allows
31 # pip-tools to generate hashes for all available distributions, not only the
32 # one for the current platform.
33
34 def _wheel_supported(self, tags=None):
35 # Ignore current platform. Support everything.
36 return True
37
38
39 def _wheel_support_index_min(self, tags=None):
40 # All wheels are equal priority for sorting.
41 return 0
42
43
44 Wheel.supported = _wheel_supported
45 Wheel.support_index_min = _wheel_support_index_min
46
47
48 class PyPIRepository(BaseRepository):
49 DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'
50
51 """
52 The PyPIRepository will use the provided Finder instance to lookup
53 packages. Typically, it looks up packages on PyPI (the default implicit
54 config), but any other PyPI mirror can be used if index_urls is
55 changed/configured on the Finder.
56 """
57 def __init__(self, pip_options, session):
58 self.session = session
59
60 index_urls = [pip_options.index_url] + pip_options.extra_index_urls
61 if pip_options.no_index:
62 index_urls = []
63
64 self.finder = PackageFinder(
65 find_links=pip_options.find_links,
66 index_urls=index_urls,
67 trusted_hosts=pip_options.trusted_hosts,
68 allow_all_prereleases=pip_options.pre,
69 process_dependency_links=pip_options.process_dependency_links,
70 session=self.session,
71 )
72
73 # Caches
74 # stores project_name => InstallationCandidate mappings for all
75 # versions reported by PyPI, so we only have to ask once for each
76 # project
77 self._available_candidates_cache = {}
78
79 # stores InstallRequirement => list(InstallRequirement) mappings
80 # of all secondary dependencies for the given requirement, so we
81 # only have to go to disk once for each requirement
82 self._dependencies_cache = {}
83
84 # Setup file paths
85 self.freshen_build_caches()
86 self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))
87 self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))
88
89 def freshen_build_caches(self):
90 """
91 Start with fresh build/source caches. Will remove any old build
92 caches from disk automatically.
93 """
94 self._build_dir = TemporaryDirectory(fs_str('build'))
95 self._source_dir = TemporaryDirectory(fs_str('source'))
96
97 @property
98 def build_dir(self):
99 return self._build_dir.name
100
101 @property
102 def source_dir(self):
103 return self._source_dir.name
104
105 def clear_caches(self):
106 rmtree(self._download_dir, ignore_errors=True)
107 rmtree(self._wheel_download_dir, ignore_errors=True)
108
109 def find_all_candidates(self, req_name):
110 if req_name not in self._available_candidates_cache:
111 # pip 8 changed the internal API, making this a public method
112 if pip_version_info >= (8, 0):
113 candidates = self.finder.find_all_candidates(req_name)
114 else:
115 candidates = self.finder._find_all_versions(req_name)
116 self._available_candidates_cache[req_name] = candidates
117 return self._available_candidates_cache[req_name]
118
119 def find_best_match(self, ireq, prereleases=None):
120 """
121 Returns a Version object that indicates the best match for the given
122 InstallRequirement according to the external repository.
123 """
124 if ireq.editable:
125 return ireq # return itself as the best match
126
127 all_candidates = self.find_all_candidates(ireq.name)
128 candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)
129 matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),
130 prereleases=prereleases)
131
132 # Reuses pip's internal candidate sort key to sort
133 matching_candidates = [candidates_by_version[ver] for ver in matching_versions]
134 if not matching_candidates:
135 raise NoCandidateFound(ireq, all_candidates)
136 best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)
137
138 # Turn the candidate into a pinned InstallRequirement
139 return make_install_requirement(
140 best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint
141 )
142
143 def get_dependencies(self, ireq):
144 """
145 Given a pinned or an editable InstallRequirement, returns a set of
146 dependencies (also InstallRequirements, but not necessarily pinned).
147 They indicate the secondary dependencies for the given requirement.
148 """
149 if not (ireq.editable or is_pinned_requirement(ireq)):
150 raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))
151
152 if ireq not in self._dependencies_cache:
153 if ireq.link and not ireq.link.is_artifact:
154 # No download_dir for VCS sources. This also works around pip
155 # using git-checkout-index, which gets rid of the .git dir.
156 download_dir = None
157 else:
158 download_dir = self._download_dir
159 if not os.path.isdir(download_dir):
160 os.makedirs(download_dir)
161 if not os.path.isdir(self._wheel_download_dir):
162 os.makedirs(self._wheel_download_dir)
163
164 reqset = RequirementSet(self.build_dir,
165 self.source_dir,
166 download_dir=download_dir,
167 wheel_download_dir=self._wheel_download_dir,
168 session=self.session)
169 self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)
170 return set(self._dependencies_cache[ireq])
171
172 def get_hashes(self, ireq):
173 """
174 Given a pinned InstallRequire, returns a set of hashes that represent
175 all of the files for a given requirement. It is not acceptable for an
176 editable or unpinned requirement to be passed to this function.
177 """
178 if not is_pinned_requirement(ireq):
179 raise TypeError(
180 "Expected pinned requirement, not unpinned or editable, got {}".format(ireq))
181
182 # We need to get all of the candidates that match our current version
183 # pin, these will represent all of the files that could possibly
184 # satisify this constraint.
185 all_candidates = self.find_all_candidates(ireq.name)
186 candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)
187 matching_versions = list(
188 ireq.specifier.filter((candidate.version for candidate in all_candidates)))
189 matching_candidates = candidates_by_version[matching_versions[0]]
190
191 return {
192 self._get_file_hash(candidate.location)
193 for candidate in matching_candidates
194 }
195
196 def _get_file_hash(self, location):
197 with TemporaryDirectory() as tmpdir:
198 unpack_url(
199 location, self.build_dir,
200 download_dir=tmpdir, only_download=True, session=self.session
201 )
202 files = os.listdir(tmpdir)
203 assert len(files) == 1
204 filename = os.path.abspath(os.path.join(tmpdir, files[0]))
205
206 h = hashlib.new(FAVORITE_HASH)
207 with open(filename, "rb") as fp:
208 for chunk in iter(lambda: fp.read(8096), b""):
209 h.update(chunk)
210
211 return ":".join([FAVORITE_HASH, h.hexdigest()])
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/repositories/pypi.py b/piptools/repositories/pypi.py
--- a/piptools/repositories/pypi.py
+++ b/piptools/repositories/pypi.py
@@ -4,9 +4,10 @@
import hashlib
import os
+from contextlib import contextmanager
from shutil import rmtree
-from pip.download import unpack_url
+from pip.download import is_file_url, url_to_path
from pip.index import PackageFinder
from pip.req.req_set import RequirementSet
from pip.wheel import Wheel
@@ -194,18 +195,38 @@
}
def _get_file_hash(self, location):
- with TemporaryDirectory() as tmpdir:
- unpack_url(
- location, self.build_dir,
- download_dir=tmpdir, only_download=True, session=self.session
- )
- files = os.listdir(tmpdir)
- assert len(files) == 1
- filename = os.path.abspath(os.path.join(tmpdir, files[0]))
-
- h = hashlib.new(FAVORITE_HASH)
- with open(filename, "rb") as fp:
- for chunk in iter(lambda: fp.read(8096), b""):
- h.update(chunk)
-
+ h = hashlib.new(FAVORITE_HASH)
+ with open_local_or_remote_file(location, self.session) as fp:
+ for chunk in iter(lambda: fp.read(8096), b""):
+ h.update(chunk)
return ":".join([FAVORITE_HASH, h.hexdigest()])
+
+
+@contextmanager
+def open_local_or_remote_file(link, session):
+ """
+ Open local or remote file for reading.
+
+ :type link: pip.index.Link
+ :type session: requests.Session
+ :raises ValueError: If link points to a local directory.
+ :return: a context manager to the opened file-like object
+ """
+ url = link.url_without_fragment
+
+ if is_file_url(link):
+ # Local URL
+ local_path = url_to_path(url)
+ if os.path.isdir(local_path):
+ raise ValueError("Cannot open directory for read: {}".format(url))
+ else:
+ with open(local_path, 'rb') as local_file:
+ yield local_file
+ else:
+ # Remote URL
+ headers = {"Accept-Encoding": "identity"}
+ response = session.get(url, headers=headers, stream=True)
+ try:
+ yield response.raw
+ finally:
+ response.close()
|
{"golden_diff": "diff --git a/piptools/repositories/pypi.py b/piptools/repositories/pypi.py\n--- a/piptools/repositories/pypi.py\n+++ b/piptools/repositories/pypi.py\n@@ -4,9 +4,10 @@\n \n import hashlib\n import os\n+from contextlib import contextmanager\n from shutil import rmtree\n \n-from pip.download import unpack_url\n+from pip.download import is_file_url, url_to_path\n from pip.index import PackageFinder\n from pip.req.req_set import RequirementSet\n from pip.wheel import Wheel\n@@ -194,18 +195,38 @@\n }\n \n def _get_file_hash(self, location):\n- with TemporaryDirectory() as tmpdir:\n- unpack_url(\n- location, self.build_dir,\n- download_dir=tmpdir, only_download=True, session=self.session\n- )\n- files = os.listdir(tmpdir)\n- assert len(files) == 1\n- filename = os.path.abspath(os.path.join(tmpdir, files[0]))\n-\n- h = hashlib.new(FAVORITE_HASH)\n- with open(filename, \"rb\") as fp:\n- for chunk in iter(lambda: fp.read(8096), b\"\"):\n- h.update(chunk)\n-\n+ h = hashlib.new(FAVORITE_HASH)\n+ with open_local_or_remote_file(location, self.session) as fp:\n+ for chunk in iter(lambda: fp.read(8096), b\"\"):\n+ h.update(chunk)\n return \":\".join([FAVORITE_HASH, h.hexdigest()])\n+\n+\n+@contextmanager\n+def open_local_or_remote_file(link, session):\n+ \"\"\"\n+ Open local or remote file for reading.\n+\n+ :type link: pip.index.Link\n+ :type session: requests.Session\n+ :raises ValueError: If link points to a local directory.\n+ :return: a context manager to the opened file-like object\n+ \"\"\"\n+ url = link.url_without_fragment\n+\n+ if is_file_url(link):\n+ # Local URL\n+ local_path = url_to_path(url)\n+ if os.path.isdir(local_path):\n+ raise ValueError(\"Cannot open directory for read: {}\".format(url))\n+ else:\n+ with open(local_path, 'rb') as local_file:\n+ yield local_file\n+ else:\n+ # Remote URL\n+ headers = {\"Accept-Encoding\": \"identity\"}\n+ response = session.get(url, headers=headers, stream=True)\n+ try:\n+ yield response.raw\n+ finally:\n+ response.close()\n", "issue": "NotADirectoryError when running --generate-hashes\nRunning `pip-compile` works flawless, but pip-compile `--generate-hashes` produces the following error:\r\n\r\n> Traceback (most recent call last):\r\n> File \"/home/ric/.local/bin/pip-compile\", line 11, in <module>\r\n> sys.exit(cli())\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/click/core.py\", line 722, in __call__\r\n> return self.main(*args, **kwargs)\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/click/core.py\", line 697, in main\r\n> rv = self.invoke(ctx)\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/click/core.py\", line 895, in invoke\r\n> return ctx.invoke(self.callback, **ctx.params)\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/click/core.py\", line 535, in invoke\r\n> return callback(*args, **kwargs)\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/scripts/compile.py\", line 186, in cli\r\n> hashes = resolver.resolve_hashes(results)\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/resolver.py\", line 77, in resolve_hashes\r\n> return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/resolver.py\", line 77, in <dictcomp>\r\n> return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py\", line 169, in get_hashes\r\n> for candidate in matching_candidates\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py\", line 169, in <setcomp>\r\n> for candidate in matching_candidates\r\n> File \"/home/ric/.local/lib/python3.5/site-packages/piptools/repositories/pypi.py\", line 176, in _get_file_hash\r\n> download_dir=tmpdir, only_download=True, session=self.session\r\n> File \"/usr/lib/python3/dist-packages/pip/download.py\", line 821, in unpack_url\r\n> hashes=hashes\r\n> File \"/usr/lib/python3/dist-packages/pip/download.py\", line 663, in unpack_http_url\r\n> unpack_file(from_path, location, content_type, link)\r\n> File \"/usr/lib/python3/dist-packages/pip/utils/__init__.py\", line 623, in unpack_file\r\n> untar_file(filename, location)\r\n> File \"/usr/lib/python3/dist-packages/pip/utils/__init__.py\", line 595, in untar_file\r\n> with open(path, 'wb') as destfp:\r\n> NotADirectoryError: [Errno 20] Not a directory: '/tmp/tmprk7taktxbuild/LICENSE/LICENSE_STIX'\r\n> \r\n\r\n##### Environment Versions\r\n\r\n1. Ubuntu 16.04\r\n1. Python 3.5.3\r\n1. pip 9.0.1\r\n1. pip-compile 1.9.0\r\n\r\nThis is my `requirements.in` file:\r\n\r\n> \r\n> seaborn==0.7.1\r\n> pandas==0.20.1\r\n> numpy==1.12.1\r\n> scipy==0.19.0\r\n> tqdm==4.11.2\r\n> matplotlib==2.0.2\r\n> \n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport hashlib\nimport os\nfrom shutil import rmtree\n\nfrom pip.download import unpack_url\nfrom pip.index import PackageFinder\nfrom pip.req.req_set import RequirementSet\nfrom pip.wheel import Wheel\ntry:\n from pip.utils.hashes import FAVORITE_HASH\nexcept ImportError:\n FAVORITE_HASH = 'sha256'\n\nfrom ..cache import CACHE_DIR\nfrom ..exceptions import NoCandidateFound\nfrom ..utils import (fs_str, is_pinned_requirement, lookup_table,\n make_install_requirement, pip_version_info)\nfrom .base import BaseRepository\n\ntry:\n from tempfile import TemporaryDirectory # added in 3.2\nexcept ImportError:\n from .._compat import TemporaryDirectory\n\n\n# Monkey patch pip's Wheel class to support all platform tags. This allows\n# pip-tools to generate hashes for all available distributions, not only the\n# one for the current platform.\n\ndef _wheel_supported(self, tags=None):\n # Ignore current platform. Support everything.\n return True\n\n\ndef _wheel_support_index_min(self, tags=None):\n # All wheels are equal priority for sorting.\n return 0\n\n\nWheel.supported = _wheel_supported\nWheel.support_index_min = _wheel_support_index_min\n\n\nclass PyPIRepository(BaseRepository):\n DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'\n\n \"\"\"\n The PyPIRepository will use the provided Finder instance to lookup\n packages. Typically, it looks up packages on PyPI (the default implicit\n config), but any other PyPI mirror can be used if index_urls is\n changed/configured on the Finder.\n \"\"\"\n def __init__(self, pip_options, session):\n self.session = session\n\n index_urls = [pip_options.index_url] + pip_options.extra_index_urls\n if pip_options.no_index:\n index_urls = []\n\n self.finder = PackageFinder(\n find_links=pip_options.find_links,\n index_urls=index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n allow_all_prereleases=pip_options.pre,\n process_dependency_links=pip_options.process_dependency_links,\n session=self.session,\n )\n\n # Caches\n # stores project_name => InstallationCandidate mappings for all\n # versions reported by PyPI, so we only have to ask once for each\n # project\n self._available_candidates_cache = {}\n\n # stores InstallRequirement => list(InstallRequirement) mappings\n # of all secondary dependencies for the given requirement, so we\n # only have to go to disk once for each requirement\n self._dependencies_cache = {}\n\n # Setup file paths\n self.freshen_build_caches()\n self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))\n self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))\n\n def freshen_build_caches(self):\n \"\"\"\n Start with fresh build/source caches. Will remove any old build\n caches from disk automatically.\n \"\"\"\n self._build_dir = TemporaryDirectory(fs_str('build'))\n self._source_dir = TemporaryDirectory(fs_str('source'))\n\n @property\n def build_dir(self):\n return self._build_dir.name\n\n @property\n def source_dir(self):\n return self._source_dir.name\n\n def clear_caches(self):\n rmtree(self._download_dir, ignore_errors=True)\n rmtree(self._wheel_download_dir, ignore_errors=True)\n\n def find_all_candidates(self, req_name):\n if req_name not in self._available_candidates_cache:\n # pip 8 changed the internal API, making this a public method\n if pip_version_info >= (8, 0):\n candidates = self.finder.find_all_candidates(req_name)\n else:\n candidates = self.finder._find_all_versions(req_name)\n self._available_candidates_cache[req_name] = candidates\n return self._available_candidates_cache[req_name]\n\n def find_best_match(self, ireq, prereleases=None):\n \"\"\"\n Returns a Version object that indicates the best match for the given\n InstallRequirement according to the external repository.\n \"\"\"\n if ireq.editable:\n return ireq # return itself as the best match\n\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)\n matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),\n prereleases=prereleases)\n\n # Reuses pip's internal candidate sort key to sort\n matching_candidates = [candidates_by_version[ver] for ver in matching_versions]\n if not matching_candidates:\n raise NoCandidateFound(ireq, all_candidates)\n best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)\n\n # Turn the candidate into a pinned InstallRequirement\n return make_install_requirement(\n best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint\n )\n\n def get_dependencies(self, ireq):\n \"\"\"\n Given a pinned or an editable InstallRequirement, returns a set of\n dependencies (also InstallRequirements, but not necessarily pinned).\n They indicate the secondary dependencies for the given requirement.\n \"\"\"\n if not (ireq.editable or is_pinned_requirement(ireq)):\n raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))\n\n if ireq not in self._dependencies_cache:\n if ireq.link and not ireq.link.is_artifact:\n # No download_dir for VCS sources. This also works around pip\n # using git-checkout-index, which gets rid of the .git dir.\n download_dir = None\n else:\n download_dir = self._download_dir\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n if not os.path.isdir(self._wheel_download_dir):\n os.makedirs(self._wheel_download_dir)\n\n reqset = RequirementSet(self.build_dir,\n self.source_dir,\n download_dir=download_dir,\n wheel_download_dir=self._wheel_download_dir,\n session=self.session)\n self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)\n return set(self._dependencies_cache[ireq])\n\n def get_hashes(self, ireq):\n \"\"\"\n Given a pinned InstallRequire, returns a set of hashes that represent\n all of the files for a given requirement. It is not acceptable for an\n editable or unpinned requirement to be passed to this function.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\n \"Expected pinned requirement, not unpinned or editable, got {}\".format(ireq))\n\n # We need to get all of the candidates that match our current version\n # pin, these will represent all of the files that could possibly\n # satisify this constraint.\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)\n matching_versions = list(\n ireq.specifier.filter((candidate.version for candidate in all_candidates)))\n matching_candidates = candidates_by_version[matching_versions[0]]\n\n return {\n self._get_file_hash(candidate.location)\n for candidate in matching_candidates\n }\n\n def _get_file_hash(self, location):\n with TemporaryDirectory() as tmpdir:\n unpack_url(\n location, self.build_dir,\n download_dir=tmpdir, only_download=True, session=self.session\n )\n files = os.listdir(tmpdir)\n assert len(files) == 1\n filename = os.path.abspath(os.path.join(tmpdir, files[0]))\n\n h = hashlib.new(FAVORITE_HASH)\n with open(filename, \"rb\") as fp:\n for chunk in iter(lambda: fp.read(8096), b\"\"):\n h.update(chunk)\n\n return \":\".join([FAVORITE_HASH, h.hexdigest()])\n", "path": "piptools/repositories/pypi.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport hashlib\nimport os\nfrom contextlib import contextmanager\nfrom shutil import rmtree\n\nfrom pip.download import is_file_url, url_to_path\nfrom pip.index import PackageFinder\nfrom pip.req.req_set import RequirementSet\nfrom pip.wheel import Wheel\ntry:\n from pip.utils.hashes import FAVORITE_HASH\nexcept ImportError:\n FAVORITE_HASH = 'sha256'\n\nfrom ..cache import CACHE_DIR\nfrom ..exceptions import NoCandidateFound\nfrom ..utils import (fs_str, is_pinned_requirement, lookup_table,\n make_install_requirement, pip_version_info)\nfrom .base import BaseRepository\n\ntry:\n from tempfile import TemporaryDirectory # added in 3.2\nexcept ImportError:\n from .._compat import TemporaryDirectory\n\n\n# Monkey patch pip's Wheel class to support all platform tags. This allows\n# pip-tools to generate hashes for all available distributions, not only the\n# one for the current platform.\n\ndef _wheel_supported(self, tags=None):\n # Ignore current platform. Support everything.\n return True\n\n\ndef _wheel_support_index_min(self, tags=None):\n # All wheels are equal priority for sorting.\n return 0\n\n\nWheel.supported = _wheel_supported\nWheel.support_index_min = _wheel_support_index_min\n\n\nclass PyPIRepository(BaseRepository):\n DEFAULT_INDEX_URL = 'https://pypi.python.org/simple'\n\n \"\"\"\n The PyPIRepository will use the provided Finder instance to lookup\n packages. Typically, it looks up packages on PyPI (the default implicit\n config), but any other PyPI mirror can be used if index_urls is\n changed/configured on the Finder.\n \"\"\"\n def __init__(self, pip_options, session):\n self.session = session\n\n index_urls = [pip_options.index_url] + pip_options.extra_index_urls\n if pip_options.no_index:\n index_urls = []\n\n self.finder = PackageFinder(\n find_links=pip_options.find_links,\n index_urls=index_urls,\n trusted_hosts=pip_options.trusted_hosts,\n allow_all_prereleases=pip_options.pre,\n process_dependency_links=pip_options.process_dependency_links,\n session=self.session,\n )\n\n # Caches\n # stores project_name => InstallationCandidate mappings for all\n # versions reported by PyPI, so we only have to ask once for each\n # project\n self._available_candidates_cache = {}\n\n # stores InstallRequirement => list(InstallRequirement) mappings\n # of all secondary dependencies for the given requirement, so we\n # only have to go to disk once for each requirement\n self._dependencies_cache = {}\n\n # Setup file paths\n self.freshen_build_caches()\n self._download_dir = fs_str(os.path.join(CACHE_DIR, 'pkgs'))\n self._wheel_download_dir = fs_str(os.path.join(CACHE_DIR, 'wheels'))\n\n def freshen_build_caches(self):\n \"\"\"\n Start with fresh build/source caches. Will remove any old build\n caches from disk automatically.\n \"\"\"\n self._build_dir = TemporaryDirectory(fs_str('build'))\n self._source_dir = TemporaryDirectory(fs_str('source'))\n\n @property\n def build_dir(self):\n return self._build_dir.name\n\n @property\n def source_dir(self):\n return self._source_dir.name\n\n def clear_caches(self):\n rmtree(self._download_dir, ignore_errors=True)\n rmtree(self._wheel_download_dir, ignore_errors=True)\n\n def find_all_candidates(self, req_name):\n if req_name not in self._available_candidates_cache:\n # pip 8 changed the internal API, making this a public method\n if pip_version_info >= (8, 0):\n candidates = self.finder.find_all_candidates(req_name)\n else:\n candidates = self.finder._find_all_versions(req_name)\n self._available_candidates_cache[req_name] = candidates\n return self._available_candidates_cache[req_name]\n\n def find_best_match(self, ireq, prereleases=None):\n \"\"\"\n Returns a Version object that indicates the best match for the given\n InstallRequirement according to the external repository.\n \"\"\"\n if ireq.editable:\n return ireq # return itself as the best match\n\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version, unique=True)\n matching_versions = ireq.specifier.filter((candidate.version for candidate in all_candidates),\n prereleases=prereleases)\n\n # Reuses pip's internal candidate sort key to sort\n matching_candidates = [candidates_by_version[ver] for ver in matching_versions]\n if not matching_candidates:\n raise NoCandidateFound(ireq, all_candidates)\n best_candidate = max(matching_candidates, key=self.finder._candidate_sort_key)\n\n # Turn the candidate into a pinned InstallRequirement\n return make_install_requirement(\n best_candidate.project, best_candidate.version, ireq.extras, constraint=ireq.constraint\n )\n\n def get_dependencies(self, ireq):\n \"\"\"\n Given a pinned or an editable InstallRequirement, returns a set of\n dependencies (also InstallRequirements, but not necessarily pinned).\n They indicate the secondary dependencies for the given requirement.\n \"\"\"\n if not (ireq.editable or is_pinned_requirement(ireq)):\n raise TypeError('Expected pinned or editable InstallRequirement, got {}'.format(ireq))\n\n if ireq not in self._dependencies_cache:\n if ireq.link and not ireq.link.is_artifact:\n # No download_dir for VCS sources. This also works around pip\n # using git-checkout-index, which gets rid of the .git dir.\n download_dir = None\n else:\n download_dir = self._download_dir\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n if not os.path.isdir(self._wheel_download_dir):\n os.makedirs(self._wheel_download_dir)\n\n reqset = RequirementSet(self.build_dir,\n self.source_dir,\n download_dir=download_dir,\n wheel_download_dir=self._wheel_download_dir,\n session=self.session)\n self._dependencies_cache[ireq] = reqset._prepare_file(self.finder, ireq)\n return set(self._dependencies_cache[ireq])\n\n def get_hashes(self, ireq):\n \"\"\"\n Given a pinned InstallRequire, returns a set of hashes that represent\n all of the files for a given requirement. It is not acceptable for an\n editable or unpinned requirement to be passed to this function.\n \"\"\"\n if not is_pinned_requirement(ireq):\n raise TypeError(\n \"Expected pinned requirement, not unpinned or editable, got {}\".format(ireq))\n\n # We need to get all of the candidates that match our current version\n # pin, these will represent all of the files that could possibly\n # satisify this constraint.\n all_candidates = self.find_all_candidates(ireq.name)\n candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)\n matching_versions = list(\n ireq.specifier.filter((candidate.version for candidate in all_candidates)))\n matching_candidates = candidates_by_version[matching_versions[0]]\n\n return {\n self._get_file_hash(candidate.location)\n for candidate in matching_candidates\n }\n\n def _get_file_hash(self, location):\n h = hashlib.new(FAVORITE_HASH)\n with open_local_or_remote_file(location, self.session) as fp:\n for chunk in iter(lambda: fp.read(8096), b\"\"):\n h.update(chunk)\n return \":\".join([FAVORITE_HASH, h.hexdigest()])\n\n\n@contextmanager\ndef open_local_or_remote_file(link, session):\n \"\"\"\n Open local or remote file for reading.\n\n :type link: pip.index.Link\n :type session: requests.Session\n :raises ValueError: If link points to a local directory.\n :return: a context manager to the opened file-like object\n \"\"\"\n url = link.url_without_fragment\n\n if is_file_url(link):\n # Local URL\n local_path = url_to_path(url)\n if os.path.isdir(local_path):\n raise ValueError(\"Cannot open directory for read: {}\".format(url))\n else:\n with open(local_path, 'rb') as local_file:\n yield local_file\n else:\n # Remote URL\n headers = {\"Accept-Encoding\": \"identity\"}\n response = session.get(url, headers=headers, stream=True)\n try:\n yield response.raw\n finally:\n response.close()\n", "path": "piptools/repositories/pypi.py"}]}
| 3,357 | 566 |
gh_patches_debug_37219
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-3290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bfmtv - No playable streams found on this URL
<!--
Thanks for reporting a plugin issue!
USE THE TEMPLATE. Otherwise your plugin issue may be rejected.
First, see the contribution guidelines:
https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink
Also check the list of open and closed plugin issues:
https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22
Please see the text preview to avoid unnecessary formatting errors.
-->
## Plugin Issue
<!-- Replace [ ] with [x] in order to check the box -->
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
<!-- Explain the plugin issue as thoroughly as you can. -->
There is no playable streams for bfmtv
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
`streamlink https://www.bfmtv.com/en-direct/ best`
or `streamlink https://www.bfmtv.com/paris/en-direct/ best`
or any other channels supported by this plugin
### Log output
<!--
TEXT LOG OUTPUT IS REQUIRED for a plugin issue!
Use the `--loglevel debug` parameter and avoid using parameters which suppress log output.
https://streamlink.github.io/cli.html#cmdoption-l
Make sure to **remove usernames and passwords**
You can copy the output to https://gist.github.com/ or paste it below.
Don't post screenshots of the log output and instead copy the text from your terminal application.
-->
```
streamlink --loglevel debug https://www.bfmtv.com/en-direct/ best
[cli][debug] OS: Linux-5.8.15-201.fc32.x86_64-x86_64-with-glibc2.2.5
[cli][debug] Python: 3.8.6
[cli][debug] Streamlink: 1.7.0
[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)
[cli][info] Found matching plugin bfmtv for URL https://www.bfmtv.com/en-direct/
error: No playable streams found on this URL: https://www.bfmtv.com/en-direct/
```
### Additional comments, etc.
As a workaround you can use their dailymotion stream: `streamlink https://www.dailymotion.com/embed/video/xgz4t1 best`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/bfmtv.py`
Content:
```
1 import re
2
3 from streamlink.plugin import Plugin
4 from streamlink.plugins.brightcove import BrightcovePlayer
5 from streamlink.stream import HLSStream
6
7
8 class BFMTV(Plugin):
9 _url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
10 _brightcove_video_re = re.compile(
11 r'data-holder="video(?P<video_id>[0-9]+)" data-account="(?P<account_id>[0-9]+)"'
12 )
13 _brightcove_video_alt_re = re.compile(
14 r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"',
15 re.DOTALL
16 )
17 _embed_video_url_re = re.compile(
18 r"\$YOPLAYER\('liveStitching', {.+?file: '(?P<video_url>[^\"]+?)'.+?}\);",
19 re.DOTALL
20 )
21
22 @classmethod
23 def can_handle_url(cls, url):
24 return BFMTV._url_re.match(url)
25
26 def _get_streams(self):
27 # Retrieve URL page and search for Brightcove video data
28 res = self.session.http.get(self.url)
29 match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
30 if match is not None:
31 account_id = match.group('account_id')
32 video_id = match.group('video_id')
33 player = BrightcovePlayer(self.session, account_id)
34 for stream in player.get_streams(video_id):
35 yield stream
36 else:
37 # Try to get the stream URL in the page
38 match = self._embed_video_url_re.search(res.text)
39 if match is not None:
40 video_url = match.group('video_url')
41 if '.m3u8' in video_url:
42 yield from HLSStream.parse_variant_playlist(self.session, video_url).items()
43
44
45 __plugin__ = BFMTV
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py
--- a/src/streamlink/plugins/bfmtv.py
+++ b/src/streamlink/plugins/bfmtv.py
@@ -1,45 +1,46 @@
+import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugins.brightcove import BrightcovePlayer
-from streamlink.stream import HLSStream
+
+log = logging.getLogger(__name__)
class BFMTV(Plugin):
_url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
+ _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'
_brightcove_video_re = re.compile(
- r'data-holder="video(?P<video_id>[0-9]+)" data-account="(?P<account_id>[0-9]+)"'
- )
- _brightcove_video_alt_re = re.compile(
- r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"',
+ r'accountid="(?P<account_id>[0-9]+).*?videoid="(?P<video_id>[0-9]+)"',
re.DOTALL
)
- _embed_video_url_re = re.compile(
- r"\$YOPLAYER\('liveStitching', {.+?file: '(?P<video_url>[^\"]+?)'.+?}\);",
+ _embed_video_id_re = re.compile(
+ r'<iframe.*?src=".*?/(?P<video_id>\w+)"',
re.DOTALL
)
@classmethod
def can_handle_url(cls, url):
- return BFMTV._url_re.match(url)
+ return cls._url_re.match(url) is not None
def _get_streams(self):
# Retrieve URL page and search for Brightcove video data
res = self.session.http.get(self.url)
- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
+ match = self._brightcove_video_re.search(res.text)
if match is not None:
account_id = match.group('account_id')
+ log.debug(f'Account ID: {account_id}')
video_id = match.group('video_id')
+ log.debug(f'Video ID: {video_id}')
player = BrightcovePlayer(self.session, account_id)
- for stream in player.get_streams(video_id):
- yield stream
+ yield from player.get_streams(video_id)
else:
- # Try to get the stream URL in the page
- match = self._embed_video_url_re.search(res.text)
+ # Try to find the Dailymotion video ID
+ match = self._embed_video_id_re.search(res.text)
if match is not None:
- video_url = match.group('video_url')
- if '.m3u8' in video_url:
- yield from HLSStream.parse_variant_playlist(self.session, video_url).items()
+ video_id = match.group('video_id')
+ log.debug(f'Video ID: {video_id}')
+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()
__plugin__ = BFMTV
|
{"golden_diff": "diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py\n--- a/src/streamlink/plugins/bfmtv.py\n+++ b/src/streamlink/plugins/bfmtv.py\n@@ -1,45 +1,46 @@\n+import logging\n import re\n \n from streamlink.plugin import Plugin\n from streamlink.plugins.brightcove import BrightcovePlayer\n-from streamlink.stream import HLSStream\n+\n+log = logging.getLogger(__name__)\n \n \n class BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n+ _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'\n _brightcove_video_re = re.compile(\n- r'data-holder=\"video(?P<video_id>[0-9]+)\" data-account=\"(?P<account_id>[0-9]+)\"'\n- )\n- _brightcove_video_alt_re = re.compile(\n- r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n+ r'accountid=\"(?P<account_id>[0-9]+).*?videoid=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n- _embed_video_url_re = re.compile(\n- r\"\\$YOPLAYER\\('liveStitching', {.+?file: '(?P<video_url>[^\\\"]+?)'.+?}\\);\",\n+ _embed_video_id_re = re.compile(\n+ r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n \n @classmethod\n def can_handle_url(cls, url):\n- return BFMTV._url_re.match(url)\n+ return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n+ match = self._brightcove_video_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n+ log.debug(f'Account ID: {account_id}')\n video_id = match.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n- for stream in player.get_streams(video_id):\n- yield stream\n+ yield from player.get_streams(video_id)\n else:\n- # Try to get the stream URL in the page\n- match = self._embed_video_url_re.search(res.text)\n+ # Try to find the Dailymotion video ID\n+ match = self._embed_video_id_re.search(res.text)\n if match is not None:\n- video_url = match.group('video_url')\n- if '.m3u8' in video_url:\n- yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n+ video_id = match.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n \n \n __plugin__ = BFMTV\n", "issue": "bfmtv - No playable streams found on this URL\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\nThere is no playable streams for bfmtv\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n`streamlink https://www.bfmtv.com/en-direct/ best`\r\n\r\nor `streamlink https://www.bfmtv.com/paris/en-direct/ best`\r\nor any other channels supported by this plugin\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n\r\nDon't post screenshots of the log output and instead copy the text from your terminal application.\r\n-->\r\n\r\n```\r\nstreamlink --loglevel debug https://www.bfmtv.com/en-direct/ best\r\n[cli][debug] OS: Linux-5.8.15-201.fc32.x86_64-x86_64-with-glibc2.2.5\r\n[cli][debug] Python: 3.8.6\r\n[cli][debug] Streamlink: 1.7.0\r\n[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][info] Found matching plugin bfmtv for URL https://www.bfmtv.com/en-direct/\r\nerror: No playable streams found on this URL: https://www.bfmtv.com/en-direct/\r\n```\r\n\r\n\r\n### Additional comments, etc.\r\n\r\nAs a workaround you can use their dailymotion stream: `streamlink https://www.dailymotion.com/embed/video/xgz4t1 best`\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugins.brightcove import BrightcovePlayer\nfrom streamlink.stream import HLSStream\n\n\nclass BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n _brightcove_video_re = re.compile(\n r'data-holder=\"video(?P<video_id>[0-9]+)\" data-account=\"(?P<account_id>[0-9]+)\"'\n )\n _brightcove_video_alt_re = re.compile(\n r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _embed_video_url_re = re.compile(\n r\"\\$YOPLAYER\\('liveStitching', {.+?file: '(?P<video_url>[^\\\"]+?)'.+?}\\);\",\n re.DOTALL\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return BFMTV._url_re.match(url)\n\n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n video_id = match.group('video_id')\n player = BrightcovePlayer(self.session, account_id)\n for stream in player.get_streams(video_id):\n yield stream\n else:\n # Try to get the stream URL in the page\n match = self._embed_video_url_re.search(res.text)\n if match is not None:\n video_url = match.group('video_url')\n if '.m3u8' in video_url:\n yield from HLSStream.parse_variant_playlist(self.session, video_url).items()\n\n\n__plugin__ = BFMTV\n", "path": "src/streamlink/plugins/bfmtv.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugins.brightcove import BrightcovePlayer\n\nlog = logging.getLogger(__name__)\n\n\nclass BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'\n _brightcove_video_re = re.compile(\n r'accountid=\"(?P<account_id>[0-9]+).*?videoid=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _embed_video_id_re = re.compile(\n r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n match = self._brightcove_video_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n log.debug(f'Account ID: {account_id}')\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n yield from player.get_streams(video_id)\n else:\n # Try to find the Dailymotion video ID\n match = self._embed_video_id_re.search(res.text)\n if match is not None:\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n\n\n__plugin__ = BFMTV\n", "path": "src/streamlink/plugins/bfmtv.py"}]}
| 1,353 | 748 |
gh_patches_debug_16501
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-42087
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
meraki - Idempotency check is broken
##### SUMMARY
Current: The Meraki module will do a two pass comparison between the original and proposed configuration. First, it looks at the original and compares that data to the proposed. Second, it looks at the proposed data and compares to the original. This makes sure keys from both data structures are compared.
This has worked fine since most modules have a minimal number of parameters. However, Meraki's SSID API for wireless network configuration has around 2 dozen parameters. The API also returns a lot of information about the SSID. Both of these conditions increases the chance of idempotency problems with the current state.
A likely fix is to make the comparison only one pass. The one pass should compare the data in the proposed to the data in the comparison.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
meraki
##### ANSIBLE VERSION
```
ansible 2.7.0.dev0 (meraki/meraki_ssid f7ef022171) last updated 2018/06/24 19:45:10 (GMT -500)
config file = None
configured module search path = ['/Users/kbreit/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/kbreit/Documents/Programming/ansible/lib/ansible
executable location = /Users/kbreit/Documents/Programming/ansible/bin/ansible
python version = 3.5.4 (default, Feb 25 2018, 14:56:02) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/module_utils/network/meraki/meraki.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Ansible, but is an independent component
4
5 # This particular file snippet, and this file snippet only, is BSD licensed.
6 # Modules you write using this snippet, which is embedded dynamically by Ansible
7 # still belong to the author of the module, and may assign their own license
8 # to the complete work.
9
10 # Copyright: (c) 2018, Kevin Breit <[email protected]>
11 # All rights reserved.
12
13 # Redistribution and use in source and binary forms, with or without modification,
14 # are permitted provided that the following conditions are met:
15 #
16 # * Redistributions of source code must retain the above copyright
17 # notice, this list of conditions and the following disclaimer.
18 # * Redistributions in binary form must reproduce the above copyright notice,
19 # this list of conditions and the following disclaimer in the documentation
20 # and/or other materials provided with the distribution.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
30 # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32 import os
33 from ansible.module_utils.basic import AnsibleModule, json, env_fallback
34 from ansible.module_utils.urls import fetch_url
35 from ansible.module_utils._text import to_native, to_bytes, to_text
36
37
38 def meraki_argument_spec():
39 return dict(auth_key=dict(type='str', no_log=True, fallback=(env_fallback, ['MERAKI_KEY'])),
40 host=dict(type='str', default='api.meraki.com'),
41 use_proxy=dict(type='bool', default=False),
42 use_https=dict(type='bool', default=True),
43 validate_certs=dict(type='bool', default=True),
44 output_level=dict(type='str', default='normal', choices=['normal', 'debug']),
45 timeout=dict(type='int', default=30),
46 org_name=dict(type='str', aliases=['organization']),
47 org_id=dict(type='str'),
48 )
49
50
51 class MerakiModule(object):
52
53 def __init__(self, module, function=None):
54 self.module = module
55 self.params = module.params
56 self.result = dict(changed=False)
57 self.headers = dict()
58 self.function = function
59
60 # normal output
61 self.existing = None
62
63 # info output
64 self.config = dict()
65 self.original = None
66 self.proposed = dict()
67 self.merged = None
68
69 # debug output
70 self.filter_string = ''
71 self.method = None
72 self.path = None
73 self.response = None
74 self.status = None
75 self.url = None
76
77 # If URLs need to be modified or added for specific purposes, use .update() on the url_catalog dictionary
78 self.get_urls = {'organizations': '/organizations',
79 'network': '/organizations/{org_id}/networks',
80 'admins': '/organizations/{org_id}/admins',
81 'configTemplates': '/organizations/{org_id}/configTemplates',
82 'samlRoles': '/organizations/{org_id}/samlRoles',
83 'ssids': '/networks/{net_id}/ssids',
84 'groupPolicies': '/networks/{net_id}/groupPolicies',
85 'staticRoutes': '/networks/{net_id}/staticRoutes',
86 'vlans': '/networks/{net_id}/vlans',
87 'devices': '/networks/{net_id}/devices',
88 }
89
90 # Used to retrieve only one item
91 self.get_one_urls = {'organizations': '/organizations/{org_id}',
92 'network': '/networks/{net_id}',
93 }
94
95 # Module should add URLs which are required by the module
96 self.url_catalog = {'get_all': self.get_urls,
97 'get_one': self.get_one_urls,
98 'create': None,
99 'update': None,
100 'delete': None,
101 'misc': None,
102 }
103
104 if self.module._debug or self.params['output_level'] == 'debug':
105 self.module.warn('Enable debug output because ANSIBLE_DEBUG was set or output_level is set to debug.')
106
107 # TODO: This should be removed as org_name isn't always required
108 self.module.required_if = [('state', 'present', ['org_name']),
109 ('state', 'absent', ['org_name']),
110 ]
111 # self.module.mutually_exclusive = [('org_id', 'org_name'),
112 # ]
113 self.modifiable_methods = ['POST', 'PUT', 'DELETE']
114
115 self.headers = {'Content-Type': 'application/json',
116 'X-Cisco-Meraki-API-Key': module.params['auth_key'],
117 }
118
119 def define_protocol(self):
120 """Set protocol based on use_https parameters."""
121 if self.params['use_https'] is True:
122 self.params['protocol'] = 'https'
123 else:
124 self.params['protocol'] = 'http'
125
126 def is_update_required(self, original, proposed, optional_ignore=None):
127 """Compare original and proposed data to see if an update is needed."""
128 is_changed = False
129 ignored_keys = ('id', 'organizationId')
130 if not optional_ignore:
131 optional_ignore = ('')
132
133 for k, v in original.items():
134 try:
135 if k not in ignored_keys and k not in optional_ignore:
136 if v != proposed[k]:
137 is_changed = True
138 except KeyError:
139 if v != '':
140 is_changed = True
141 for k, v in proposed.items():
142 try:
143 if k not in ignored_keys and k not in optional_ignore:
144 if v != original[k]:
145 is_changed = True
146 except KeyError:
147 if v != '':
148 is_changed = True
149 return is_changed
150
151 def get_orgs(self):
152 """Downloads all organizations for a user."""
153 return self.request('/organizations', method='GET')
154
155 def is_org_valid(self, data, org_name=None, org_id=None):
156 """Checks whether a specific org exists and is duplicated.
157
158 If 0, doesn't exist. 1, exists and not duplicated. >1 duplicated.
159 """
160 org_count = 0
161 if org_name is not None:
162 for o in data:
163 if o['name'] == org_name:
164 org_count += 1
165 if org_id is not None:
166 for o in data:
167 if o['id'] == org_id:
168 org_count += 1
169 return org_count
170
171 def get_org_id(self, org_name):
172 """Returns an organization id based on organization name, only if unique.
173
174 If org_id is specified as parameter, return that instead of a lookup.
175 """
176 orgs = self.get_orgs()
177 # self.fail_json(msg='ogs', orgs=orgs)
178 if self.params['org_id'] is not None:
179 if self.is_org_valid(orgs, org_id=self.params['org_id']) is True:
180 return self.params['org_id']
181 org_count = self.is_org_valid(orgs, org_name=org_name)
182 if org_count == 0:
183 self.fail_json(msg='There are no organizations with the name {org_name}'.format(org_name=org_name))
184 if org_count > 1:
185 self.fail_json(msg='There are multiple organizations with the name {org_name}'.format(org_name=org_name))
186 elif org_count == 1:
187 for i in orgs:
188 if org_name == i['name']:
189 # self.fail_json(msg=i['id'])
190 return str(i['id'])
191
192 def get_nets(self, org_name=None, org_id=None):
193 """Downloads all networks in an organization."""
194 if org_name:
195 org_id = self.get_org_id(org_name)
196 path = self.construct_path('get_all', org_id=org_id, function='network')
197 r = self.request(path, method='GET')
198 return r
199
200 def get_net(self, org_name, net_name, data=None):
201 """Return network information about a particular network."""
202 # TODO: Allow method to download data on its own
203 # if not data:
204 # org_id = self.get_org_id(org_name)
205 # path = '/organizations/{org_id}/networks/{net_id}'.format(
206 # org_id=org_id,
207 # net_id=self.get_net_id(
208 # org_name=org_name,
209 # net_name=net_name,
210 # data=data)
211 # )
212 # return json.loads(self.request('GET', path))
213 # else:
214 for n in data:
215 if n['name'] == net_name:
216 return n
217 return False
218
219 def get_net_id(self, org_name=None, net_name=None, data=None):
220 """Return network id from lookup or existing data."""
221 if data is None:
222 self.fail_json(msg='Must implement lookup')
223 for n in data:
224 if n['name'] == net_name:
225 return n['id']
226 self.fail_json(msg='No network found with the name {0}'.format(net_name))
227
228 def construct_path(self, action, function=None, org_id=None, net_id=None, org_name=None):
229 """Build a path from the URL catalog.
230
231 Uses function property from class for catalog lookup.
232 """
233 built_path = None
234 if function is None:
235 built_path = self.url_catalog[action][self.function]
236 else:
237 built_path = self.url_catalog[action][function]
238 if org_name:
239 org_id = self.get_org_id(org_name)
240
241 built_path = built_path.format(org_id=org_id, net_id=net_id)
242 return built_path
243
244 def request(self, path, method=None, payload=None):
245 """Generic HTTP method for Meraki requests."""
246 self.path = path
247 self.define_protocol()
248
249 if method is not None:
250 self.method = method
251 self.url = '{protocol}://{host}/api/v0/{path}'.format(path=self.path.lstrip('/'), **self.params)
252 resp, info = fetch_url(self.module, self.url,
253 headers=self.headers,
254 data=payload,
255 method=self.method,
256 timeout=self.params['timeout'],
257 use_proxy=self.params['use_proxy'],
258 )
259 self.response = info['msg']
260 self.status = info['status']
261
262 if self.status >= 300:
263 self.fail_json(msg='Request failed for {url}: {status} - {msg}'.format(**info))
264 try:
265 return json.loads(to_native(resp.read()))
266 except:
267 pass
268
269 def exit_json(self, **kwargs):
270 """Custom written method to exit from module."""
271 self.result['response'] = self.response
272 self.result['status'] = self.status
273 # Return the gory details when we need it
274 if self.params['output_level'] == 'debug':
275 self.result['method'] = self.method
276 self.result['url'] = self.url
277
278 self.result.update(**kwargs)
279 self.module.exit_json(**self.result)
280
281 def fail_json(self, msg, **kwargs):
282 """Custom written method to return info on failure."""
283 self.result['response'] = self.response
284 self.result['status'] = self.status
285
286 if self.params['output_level'] == 'debug':
287 if self.url is not None:
288 self.result['method'] = self.method
289 self.result['url'] = self.url
290
291 self.result.update(**kwargs)
292 self.module.fail_json(msg=msg, **self.result)
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/ansible/module_utils/network/meraki/meraki.py b/lib/ansible/module_utils/network/meraki/meraki.py
--- a/lib/ansible/module_utils/network/meraki/meraki.py
+++ b/lib/ansible/module_utils/network/meraki/meraki.py
@@ -130,14 +130,14 @@
if not optional_ignore:
optional_ignore = ('')
- for k, v in original.items():
- try:
- if k not in ignored_keys and k not in optional_ignore:
- if v != proposed[k]:
- is_changed = True
- except KeyError:
- if v != '':
- is_changed = True
+ # for k, v in original.items():
+ # try:
+ # if k not in ignored_keys and k not in optional_ignore:
+ # if v != proposed[k]:
+ # is_changed = True
+ # except KeyError:
+ # if v != '':
+ # is_changed = True
for k, v in proposed.items():
try:
if k not in ignored_keys and k not in optional_ignore:
|
{"golden_diff": "diff --git a/lib/ansible/module_utils/network/meraki/meraki.py b/lib/ansible/module_utils/network/meraki/meraki.py\n--- a/lib/ansible/module_utils/network/meraki/meraki.py\n+++ b/lib/ansible/module_utils/network/meraki/meraki.py\n@@ -130,14 +130,14 @@\n if not optional_ignore:\n optional_ignore = ('')\n \n- for k, v in original.items():\n- try:\n- if k not in ignored_keys and k not in optional_ignore:\n- if v != proposed[k]:\n- is_changed = True\n- except KeyError:\n- if v != '':\n- is_changed = True\n+ # for k, v in original.items():\n+ # try:\n+ # if k not in ignored_keys and k not in optional_ignore:\n+ # if v != proposed[k]:\n+ # is_changed = True\n+ # except KeyError:\n+ # if v != '':\n+ # is_changed = True\n for k, v in proposed.items():\n try:\n if k not in ignored_keys and k not in optional_ignore:\n", "issue": "meraki - Idempotency check is broken\n##### SUMMARY\r\nCurrent: The Meraki module will do a two pass comparison between the original and proposed configuration. First, it looks at the original and compares that data to the proposed. Second, it looks at the proposed data and compares to the original. This makes sure keys from both data structures are compared.\r\n\r\nThis has worked fine since most modules have a minimal number of parameters. However, Meraki's SSID API for wireless network configuration has around 2 dozen parameters. The API also returns a lot of information about the SSID. Both of these conditions increases the chance of idempotency problems with the current state.\r\n\r\nA likely fix is to make the comparison only one pass. The one pass should compare the data in the proposed to the data in the comparison.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nmeraki\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.7.0.dev0 (meraki/meraki_ssid f7ef022171) last updated 2018/06/24 19:45:10 (GMT -500)\r\n config file = None\r\n configured module search path = ['/Users/kbreit/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /Users/kbreit/Documents/Programming/ansible/lib/ansible\r\n executable location = /Users/kbreit/Documents/Programming/ansible/bin/ansible\r\n python version = 3.5.4 (default, Feb 25 2018, 14:56:02) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Ansible, but is an independent component\n\n# This particular file snippet, and this file snippet only, is BSD licensed.\n# Modules you write using this snippet, which is embedded dynamically by Ansible\n# still belong to the author of the module, and may assign their own license\n# to the complete work.\n\n# Copyright: (c) 2018, Kevin Breit <[email protected]>\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nfrom ansible.module_utils.basic import AnsibleModule, json, env_fallback\nfrom ansible.module_utils.urls import fetch_url\nfrom ansible.module_utils._text import to_native, to_bytes, to_text\n\n\ndef meraki_argument_spec():\n return dict(auth_key=dict(type='str', no_log=True, fallback=(env_fallback, ['MERAKI_KEY'])),\n host=dict(type='str', default='api.meraki.com'),\n use_proxy=dict(type='bool', default=False),\n use_https=dict(type='bool', default=True),\n validate_certs=dict(type='bool', default=True),\n output_level=dict(type='str', default='normal', choices=['normal', 'debug']),\n timeout=dict(type='int', default=30),\n org_name=dict(type='str', aliases=['organization']),\n org_id=dict(type='str'),\n )\n\n\nclass MerakiModule(object):\n\n def __init__(self, module, function=None):\n self.module = module\n self.params = module.params\n self.result = dict(changed=False)\n self.headers = dict()\n self.function = function\n\n # normal output\n self.existing = None\n\n # info output\n self.config = dict()\n self.original = None\n self.proposed = dict()\n self.merged = None\n\n # debug output\n self.filter_string = ''\n self.method = None\n self.path = None\n self.response = None\n self.status = None\n self.url = None\n\n # If URLs need to be modified or added for specific purposes, use .update() on the url_catalog dictionary\n self.get_urls = {'organizations': '/organizations',\n 'network': '/organizations/{org_id}/networks',\n 'admins': '/organizations/{org_id}/admins',\n 'configTemplates': '/organizations/{org_id}/configTemplates',\n 'samlRoles': '/organizations/{org_id}/samlRoles',\n 'ssids': '/networks/{net_id}/ssids',\n 'groupPolicies': '/networks/{net_id}/groupPolicies',\n 'staticRoutes': '/networks/{net_id}/staticRoutes',\n 'vlans': '/networks/{net_id}/vlans',\n 'devices': '/networks/{net_id}/devices',\n }\n\n # Used to retrieve only one item\n self.get_one_urls = {'organizations': '/organizations/{org_id}',\n 'network': '/networks/{net_id}',\n }\n\n # Module should add URLs which are required by the module\n self.url_catalog = {'get_all': self.get_urls,\n 'get_one': self.get_one_urls,\n 'create': None,\n 'update': None,\n 'delete': None,\n 'misc': None,\n }\n\n if self.module._debug or self.params['output_level'] == 'debug':\n self.module.warn('Enable debug output because ANSIBLE_DEBUG was set or output_level is set to debug.')\n\n # TODO: This should be removed as org_name isn't always required\n self.module.required_if = [('state', 'present', ['org_name']),\n ('state', 'absent', ['org_name']),\n ]\n # self.module.mutually_exclusive = [('org_id', 'org_name'),\n # ]\n self.modifiable_methods = ['POST', 'PUT', 'DELETE']\n\n self.headers = {'Content-Type': 'application/json',\n 'X-Cisco-Meraki-API-Key': module.params['auth_key'],\n }\n\n def define_protocol(self):\n \"\"\"Set protocol based on use_https parameters.\"\"\"\n if self.params['use_https'] is True:\n self.params['protocol'] = 'https'\n else:\n self.params['protocol'] = 'http'\n\n def is_update_required(self, original, proposed, optional_ignore=None):\n \"\"\"Compare original and proposed data to see if an update is needed.\"\"\"\n is_changed = False\n ignored_keys = ('id', 'organizationId')\n if not optional_ignore:\n optional_ignore = ('')\n\n for k, v in original.items():\n try:\n if k not in ignored_keys and k not in optional_ignore:\n if v != proposed[k]:\n is_changed = True\n except KeyError:\n if v != '':\n is_changed = True\n for k, v in proposed.items():\n try:\n if k not in ignored_keys and k not in optional_ignore:\n if v != original[k]:\n is_changed = True\n except KeyError:\n if v != '':\n is_changed = True\n return is_changed\n\n def get_orgs(self):\n \"\"\"Downloads all organizations for a user.\"\"\"\n return self.request('/organizations', method='GET')\n\n def is_org_valid(self, data, org_name=None, org_id=None):\n \"\"\"Checks whether a specific org exists and is duplicated.\n\n If 0, doesn't exist. 1, exists and not duplicated. >1 duplicated.\n \"\"\"\n org_count = 0\n if org_name is not None:\n for o in data:\n if o['name'] == org_name:\n org_count += 1\n if org_id is not None:\n for o in data:\n if o['id'] == org_id:\n org_count += 1\n return org_count\n\n def get_org_id(self, org_name):\n \"\"\"Returns an organization id based on organization name, only if unique.\n\n If org_id is specified as parameter, return that instead of a lookup.\n \"\"\"\n orgs = self.get_orgs()\n # self.fail_json(msg='ogs', orgs=orgs)\n if self.params['org_id'] is not None:\n if self.is_org_valid(orgs, org_id=self.params['org_id']) is True:\n return self.params['org_id']\n org_count = self.is_org_valid(orgs, org_name=org_name)\n if org_count == 0:\n self.fail_json(msg='There are no organizations with the name {org_name}'.format(org_name=org_name))\n if org_count > 1:\n self.fail_json(msg='There are multiple organizations with the name {org_name}'.format(org_name=org_name))\n elif org_count == 1:\n for i in orgs:\n if org_name == i['name']:\n # self.fail_json(msg=i['id'])\n return str(i['id'])\n\n def get_nets(self, org_name=None, org_id=None):\n \"\"\"Downloads all networks in an organization.\"\"\"\n if org_name:\n org_id = self.get_org_id(org_name)\n path = self.construct_path('get_all', org_id=org_id, function='network')\n r = self.request(path, method='GET')\n return r\n\n def get_net(self, org_name, net_name, data=None):\n \"\"\"Return network information about a particular network.\"\"\"\n # TODO: Allow method to download data on its own\n # if not data:\n # org_id = self.get_org_id(org_name)\n # path = '/organizations/{org_id}/networks/{net_id}'.format(\n # org_id=org_id,\n # net_id=self.get_net_id(\n # org_name=org_name,\n # net_name=net_name,\n # data=data)\n # )\n # return json.loads(self.request('GET', path))\n # else:\n for n in data:\n if n['name'] == net_name:\n return n\n return False\n\n def get_net_id(self, org_name=None, net_name=None, data=None):\n \"\"\"Return network id from lookup or existing data.\"\"\"\n if data is None:\n self.fail_json(msg='Must implement lookup')\n for n in data:\n if n['name'] == net_name:\n return n['id']\n self.fail_json(msg='No network found with the name {0}'.format(net_name))\n\n def construct_path(self, action, function=None, org_id=None, net_id=None, org_name=None):\n \"\"\"Build a path from the URL catalog.\n\n Uses function property from class for catalog lookup.\n \"\"\"\n built_path = None\n if function is None:\n built_path = self.url_catalog[action][self.function]\n else:\n built_path = self.url_catalog[action][function]\n if org_name:\n org_id = self.get_org_id(org_name)\n\n built_path = built_path.format(org_id=org_id, net_id=net_id)\n return built_path\n\n def request(self, path, method=None, payload=None):\n \"\"\"Generic HTTP method for Meraki requests.\"\"\"\n self.path = path\n self.define_protocol()\n\n if method is not None:\n self.method = method\n self.url = '{protocol}://{host}/api/v0/{path}'.format(path=self.path.lstrip('/'), **self.params)\n resp, info = fetch_url(self.module, self.url,\n headers=self.headers,\n data=payload,\n method=self.method,\n timeout=self.params['timeout'],\n use_proxy=self.params['use_proxy'],\n )\n self.response = info['msg']\n self.status = info['status']\n\n if self.status >= 300:\n self.fail_json(msg='Request failed for {url}: {status} - {msg}'.format(**info))\n try:\n return json.loads(to_native(resp.read()))\n except:\n pass\n\n def exit_json(self, **kwargs):\n \"\"\"Custom written method to exit from module.\"\"\"\n self.result['response'] = self.response\n self.result['status'] = self.status\n # Return the gory details when we need it\n if self.params['output_level'] == 'debug':\n self.result['method'] = self.method\n self.result['url'] = self.url\n\n self.result.update(**kwargs)\n self.module.exit_json(**self.result)\n\n def fail_json(self, msg, **kwargs):\n \"\"\"Custom written method to return info on failure.\"\"\"\n self.result['response'] = self.response\n self.result['status'] = self.status\n\n if self.params['output_level'] == 'debug':\n if self.url is not None:\n self.result['method'] = self.method\n self.result['url'] = self.url\n\n self.result.update(**kwargs)\n self.module.fail_json(msg=msg, **self.result)\n", "path": "lib/ansible/module_utils/network/meraki/meraki.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Ansible, but is an independent component\n\n# This particular file snippet, and this file snippet only, is BSD licensed.\n# Modules you write using this snippet, which is embedded dynamically by Ansible\n# still belong to the author of the module, and may assign their own license\n# to the complete work.\n\n# Copyright: (c) 2018, Kevin Breit <[email protected]>\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\n# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nfrom ansible.module_utils.basic import AnsibleModule, json, env_fallback\nfrom ansible.module_utils.urls import fetch_url\nfrom ansible.module_utils._text import to_native, to_bytes, to_text\n\n\ndef meraki_argument_spec():\n return dict(auth_key=dict(type='str', no_log=True, fallback=(env_fallback, ['MERAKI_KEY'])),\n host=dict(type='str', default='api.meraki.com'),\n use_proxy=dict(type='bool', default=False),\n use_https=dict(type='bool', default=True),\n validate_certs=dict(type='bool', default=True),\n output_level=dict(type='str', default='normal', choices=['normal', 'debug']),\n timeout=dict(type='int', default=30),\n org_name=dict(type='str', aliases=['organization']),\n org_id=dict(type='str'),\n )\n\n\nclass MerakiModule(object):\n\n def __init__(self, module, function=None):\n self.module = module\n self.params = module.params\n self.result = dict(changed=False)\n self.headers = dict()\n self.function = function\n\n # normal output\n self.existing = None\n\n # info output\n self.config = dict()\n self.original = None\n self.proposed = dict()\n self.merged = None\n\n # debug output\n self.filter_string = ''\n self.method = None\n self.path = None\n self.response = None\n self.status = None\n self.url = None\n\n # If URLs need to be modified or added for specific purposes, use .update() on the url_catalog dictionary\n self.get_urls = {'organizations': '/organizations',\n 'network': '/organizations/{org_id}/networks',\n 'admins': '/organizations/{org_id}/admins',\n 'configTemplates': '/organizations/{org_id}/configTemplates',\n 'samlRoles': '/organizations/{org_id}/samlRoles',\n 'ssids': '/networks/{net_id}/ssids',\n 'groupPolicies': '/networks/{net_id}/groupPolicies',\n 'staticRoutes': '/networks/{net_id}/staticRoutes',\n 'vlans': '/networks/{net_id}/vlans',\n 'devices': '/networks/{net_id}/devices',\n }\n\n # Used to retrieve only one item\n self.get_one_urls = {'organizations': '/organizations/{org_id}',\n 'network': '/networks/{net_id}',\n }\n\n # Module should add URLs which are required by the module\n self.url_catalog = {'get_all': self.get_urls,\n 'get_one': self.get_one_urls,\n 'create': None,\n 'update': None,\n 'delete': None,\n 'misc': None,\n }\n\n if self.module._debug or self.params['output_level'] == 'debug':\n self.module.warn('Enable debug output because ANSIBLE_DEBUG was set or output_level is set to debug.')\n\n # TODO: This should be removed as org_name isn't always required\n self.module.required_if = [('state', 'present', ['org_name']),\n ('state', 'absent', ['org_name']),\n ]\n # self.module.mutually_exclusive = [('org_id', 'org_name'),\n # ]\n self.modifiable_methods = ['POST', 'PUT', 'DELETE']\n\n self.headers = {'Content-Type': 'application/json',\n 'X-Cisco-Meraki-API-Key': module.params['auth_key'],\n }\n\n def define_protocol(self):\n \"\"\"Set protocol based on use_https parameters.\"\"\"\n if self.params['use_https'] is True:\n self.params['protocol'] = 'https'\n else:\n self.params['protocol'] = 'http'\n\n def is_update_required(self, original, proposed, optional_ignore=None):\n \"\"\"Compare original and proposed data to see if an update is needed.\"\"\"\n is_changed = False\n ignored_keys = ('id', 'organizationId')\n if not optional_ignore:\n optional_ignore = ('')\n\n # for k, v in original.items():\n # try:\n # if k not in ignored_keys and k not in optional_ignore:\n # if v != proposed[k]:\n # is_changed = True\n # except KeyError:\n # if v != '':\n # is_changed = True\n for k, v in proposed.items():\n try:\n if k not in ignored_keys and k not in optional_ignore:\n if v != original[k]:\n is_changed = True\n except KeyError:\n if v != '':\n is_changed = True\n return is_changed\n\n def get_orgs(self):\n \"\"\"Downloads all organizations for a user.\"\"\"\n return self.request('/organizations', method='GET')\n\n def is_org_valid(self, data, org_name=None, org_id=None):\n \"\"\"Checks whether a specific org exists and is duplicated.\n\n If 0, doesn't exist. 1, exists and not duplicated. >1 duplicated.\n \"\"\"\n org_count = 0\n if org_name is not None:\n for o in data:\n if o['name'] == org_name:\n org_count += 1\n if org_id is not None:\n for o in data:\n if o['id'] == org_id:\n org_count += 1\n return org_count\n\n def get_org_id(self, org_name):\n \"\"\"Returns an organization id based on organization name, only if unique.\n\n If org_id is specified as parameter, return that instead of a lookup.\n \"\"\"\n orgs = self.get_orgs()\n # self.fail_json(msg='ogs', orgs=orgs)\n if self.params['org_id'] is not None:\n if self.is_org_valid(orgs, org_id=self.params['org_id']) is True:\n return self.params['org_id']\n org_count = self.is_org_valid(orgs, org_name=org_name)\n if org_count == 0:\n self.fail_json(msg='There are no organizations with the name {org_name}'.format(org_name=org_name))\n if org_count > 1:\n self.fail_json(msg='There are multiple organizations with the name {org_name}'.format(org_name=org_name))\n elif org_count == 1:\n for i in orgs:\n if org_name == i['name']:\n # self.fail_json(msg=i['id'])\n return str(i['id'])\n\n def get_nets(self, org_name=None, org_id=None):\n \"\"\"Downloads all networks in an organization.\"\"\"\n if org_name:\n org_id = self.get_org_id(org_name)\n path = self.construct_path('get_all', org_id=org_id, function='network')\n r = self.request(path, method='GET')\n return r\n\n def get_net(self, org_name, net_name, data=None):\n \"\"\"Return network information about a particular network.\"\"\"\n # TODO: Allow method to download data on its own\n # if not data:\n # org_id = self.get_org_id(org_name)\n # path = '/organizations/{org_id}/networks/{net_id}'.format(\n # org_id=org_id,\n # net_id=self.get_net_id(\n # org_name=org_name,\n # net_name=net_name,\n # data=data)\n # )\n # return json.loads(self.request('GET', path))\n # else:\n for n in data:\n if n['name'] == net_name:\n return n\n return False\n\n def get_net_id(self, org_name=None, net_name=None, data=None):\n \"\"\"Return network id from lookup or existing data.\"\"\"\n if data is None:\n self.fail_json(msg='Must implement lookup')\n for n in data:\n if n['name'] == net_name:\n return n['id']\n self.fail_json(msg='No network found with the name {0}'.format(net_name))\n\n def construct_path(self, action, function=None, org_id=None, net_id=None, org_name=None):\n \"\"\"Build a path from the URL catalog.\n\n Uses function property from class for catalog lookup.\n \"\"\"\n built_path = None\n if function is None:\n built_path = self.url_catalog[action][self.function]\n else:\n built_path = self.url_catalog[action][function]\n if org_name:\n org_id = self.get_org_id(org_name)\n\n built_path = built_path.format(org_id=org_id, net_id=net_id)\n return built_path\n\n def request(self, path, method=None, payload=None):\n \"\"\"Generic HTTP method for Meraki requests.\"\"\"\n self.path = path\n self.define_protocol()\n\n if method is not None:\n self.method = method\n self.url = '{protocol}://{host}/api/v0/{path}'.format(path=self.path.lstrip('/'), **self.params)\n resp, info = fetch_url(self.module, self.url,\n headers=self.headers,\n data=payload,\n method=self.method,\n timeout=self.params['timeout'],\n use_proxy=self.params['use_proxy'],\n )\n self.response = info['msg']\n self.status = info['status']\n\n if self.status >= 300:\n self.fail_json(msg='Request failed for {url}: {status} - {msg}'.format(**info))\n try:\n return json.loads(to_native(resp.read()))\n except:\n pass\n\n def exit_json(self, **kwargs):\n \"\"\"Custom written method to exit from module.\"\"\"\n self.result['response'] = self.response\n self.result['status'] = self.status\n # Return the gory details when we need it\n if self.params['output_level'] == 'debug':\n self.result['method'] = self.method\n self.result['url'] = self.url\n\n self.result.update(**kwargs)\n self.module.exit_json(**self.result)\n\n def fail_json(self, msg, **kwargs):\n \"\"\"Custom written method to return info on failure.\"\"\"\n self.result['response'] = self.response\n self.result['status'] = self.status\n\n if self.params['output_level'] == 'debug':\n if self.url is not None:\n self.result['method'] = self.method\n self.result['url'] = self.url\n\n self.result.update(**kwargs)\n self.module.fail_json(msg=msg, **self.result)\n", "path": "lib/ansible/module_utils/network/meraki/meraki.py"}]}
| 3,993 | 258 |
gh_patches_debug_29059
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-1477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_requires_for_build_sdist should not require wheel
[get_requires_for_build_sdist](https://github.com/pypa/setuptools/blob/master/setuptools/build_meta.py#L120) also implies [wheel](https://github.com/pypa/setuptools/blob/master/setuptools/build_meta.py#L96) as dependency, even though a wheel is not required to build a source distribution.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/build_meta.py`
Content:
```
1 """A PEP 517 interface to setuptools
2
3 Previously, when a user or a command line tool (let's call it a "frontend")
4 needed to make a request of setuptools to take a certain action, for
5 example, generating a list of installation requirements, the frontend would
6 would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
8 PEP 517 defines a different method of interfacing with setuptools. Rather
9 than calling "setup.py" directly, the frontend should:
10
11 1. Set the current directory to the directory with a setup.py file
12 2. Import this module into a safe python interpreter (one in which
13 setuptools can potentially set global variables or crash hard).
14 3. Call one of the functions defined in PEP 517.
15
16 What each function does is defined in PEP 517. However, here is a "casual"
17 definition of the functions (this definition should not be relied on for
18 bug reports or API stability):
19
20 - `build_wheel`: build a wheel in the folder and return the basename
21 - `get_requires_for_build_wheel`: get the `setup_requires` to build
22 - `prepare_metadata_for_build_wheel`: get the `install_requires`
23 - `build_sdist`: build an sdist in the folder and return the basename
24 - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
26 Again, this is not a formal definition! Just a "taste" of the module.
27 """
28
29 import os
30 import sys
31 import tokenize
32 import shutil
33 import contextlib
34
35 import setuptools
36 import distutils
37
38
39 class SetupRequirementsError(BaseException):
40 def __init__(self, specifiers):
41 self.specifiers = specifiers
42
43
44 class Distribution(setuptools.dist.Distribution):
45 def fetch_build_eggs(self, specifiers):
46 raise SetupRequirementsError(specifiers)
47
48 @classmethod
49 @contextlib.contextmanager
50 def patch(cls):
51 """
52 Replace
53 distutils.dist.Distribution with this class
54 for the duration of this context.
55 """
56 orig = distutils.core.Distribution
57 distutils.core.Distribution = cls
58 try:
59 yield
60 finally:
61 distutils.core.Distribution = orig
62
63
64 def _to_str(s):
65 """
66 Convert a filename to a string (on Python 2, explicitly
67 a byte string, not Unicode) as distutils checks for the
68 exact type str.
69 """
70 if sys.version_info[0] == 2 and not isinstance(s, str):
71 # Assume it's Unicode, as that's what the PEP says
72 # should be provided.
73 return s.encode(sys.getfilesystemencoding())
74 return s
75
76
77 def _run_setup(setup_script='setup.py'):
78 # Note that we can reuse our build directory between calls
79 # Correctness comes first, then optimization later
80 __file__ = setup_script
81 __name__ = '__main__'
82 f = getattr(tokenize, 'open', open)(__file__)
83 code = f.read().replace('\\r\\n', '\\n')
84 f.close()
85 exec(compile(code, __file__, 'exec'), locals())
86
87
88 def _fix_config(config_settings):
89 config_settings = config_settings or {}
90 config_settings.setdefault('--global-option', [])
91 return config_settings
92
93
94 def _get_build_requires(config_settings):
95 config_settings = _fix_config(config_settings)
96 requirements = ['setuptools', 'wheel']
97
98 sys.argv = sys.argv[:1] + ['egg_info'] + \
99 config_settings["--global-option"]
100 try:
101 with Distribution.patch():
102 _run_setup()
103 except SetupRequirementsError as e:
104 requirements += e.specifiers
105
106 return requirements
107
108
109 def _get_immediate_subdirectories(a_dir):
110 return [name for name in os.listdir(a_dir)
111 if os.path.isdir(os.path.join(a_dir, name))]
112
113
114 def get_requires_for_build_wheel(config_settings=None):
115 config_settings = _fix_config(config_settings)
116 return _get_build_requires(config_settings)
117
118
119 def get_requires_for_build_sdist(config_settings=None):
120 config_settings = _fix_config(config_settings)
121 return _get_build_requires(config_settings)
122
123
124 def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
125 sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]
126 _run_setup()
127
128 dist_info_directory = metadata_directory
129 while True:
130 dist_infos = [f for f in os.listdir(dist_info_directory)
131 if f.endswith('.dist-info')]
132
133 if len(dist_infos) == 0 and \
134 len(_get_immediate_subdirectories(dist_info_directory)) == 1:
135 dist_info_directory = os.path.join(
136 dist_info_directory, os.listdir(dist_info_directory)[0])
137 continue
138
139 assert len(dist_infos) == 1
140 break
141
142 # PEP 517 requires that the .dist-info directory be placed in the
143 # metadata_directory. To comply, we MUST copy the directory to the root
144 if dist_info_directory != metadata_directory:
145 shutil.move(
146 os.path.join(dist_info_directory, dist_infos[0]),
147 metadata_directory)
148 shutil.rmtree(dist_info_directory, ignore_errors=True)
149
150 return dist_infos[0]
151
152
153 def build_wheel(wheel_directory, config_settings=None,
154 metadata_directory=None):
155 config_settings = _fix_config(config_settings)
156 wheel_directory = os.path.abspath(wheel_directory)
157 sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
158 config_settings["--global-option"]
159 _run_setup()
160 if wheel_directory != 'dist':
161 shutil.rmtree(wheel_directory)
162 shutil.copytree('dist', wheel_directory)
163
164 wheels = [f for f in os.listdir(wheel_directory)
165 if f.endswith('.whl')]
166
167 assert len(wheels) == 1
168 return wheels[0]
169
170
171 def build_sdist(sdist_directory, config_settings=None):
172 config_settings = _fix_config(config_settings)
173 sdist_directory = os.path.abspath(sdist_directory)
174 sys.argv = sys.argv[:1] + ['sdist'] + \
175 config_settings["--global-option"]
176 _run_setup()
177 if sdist_directory != 'dist':
178 shutil.rmtree(sdist_directory)
179 shutil.copytree('dist', sdist_directory)
180
181 sdists = [f for f in os.listdir(sdist_directory)
182 if f.endswith('.tar.gz')]
183
184 assert len(sdists) == 1
185 return sdists[0]
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py
--- a/setuptools/build_meta.py
+++ b/setuptools/build_meta.py
@@ -91,9 +91,8 @@
return config_settings
-def _get_build_requires(config_settings):
+def _get_build_requires(config_settings, requirements):
config_settings = _fix_config(config_settings)
- requirements = ['setuptools', 'wheel']
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
@@ -113,20 +112,20 @@
def get_requires_for_build_wheel(config_settings=None):
config_settings = _fix_config(config_settings)
- return _get_build_requires(config_settings)
+ return _get_build_requires(config_settings, requirements=['setuptools', 'wheel'])
def get_requires_for_build_sdist(config_settings=None):
config_settings = _fix_config(config_settings)
- return _get_build_requires(config_settings)
+ return _get_build_requires(config_settings, requirements=['setuptools'])
def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]
_run_setup()
-
+
dist_info_directory = metadata_directory
- while True:
+ while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
|
{"golden_diff": "diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py\n--- a/setuptools/build_meta.py\n+++ b/setuptools/build_meta.py\n@@ -91,9 +91,8 @@\n return config_settings\n \n \n-def _get_build_requires(config_settings):\n+def _get_build_requires(config_settings, requirements):\n config_settings = _fix_config(config_settings)\n- requirements = ['setuptools', 'wheel']\n \n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n@@ -113,20 +112,20 @@\n \n def get_requires_for_build_wheel(config_settings=None):\n config_settings = _fix_config(config_settings)\n- return _get_build_requires(config_settings)\n+ return _get_build_requires(config_settings, requirements=['setuptools', 'wheel'])\n \n \n def get_requires_for_build_sdist(config_settings=None):\n config_settings = _fix_config(config_settings)\n- return _get_build_requires(config_settings)\n+ return _get_build_requires(config_settings, requirements=['setuptools'])\n \n \n def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]\n _run_setup()\n- \n+\n dist_info_directory = metadata_directory\n- while True: \n+ while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n", "issue": "get_requires_for_build_sdist should not require wheel\n[get_requires_for_build_sdist](https://github.com/pypa/setuptools/blob/master/setuptools/build_meta.py#L120) also implies [wheel](https://github.com/pypa/setuptools/blob/master/setuptools/build_meta.py#L96) as dependency, even though a wheel is not required to build a source distribution.\n", "before_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\n\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n raise SetupRequirementsError(specifiers)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _run_setup(setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n f = getattr(tokenize, 'open', open)(__file__)\n code = f.read().replace('\\\\r\\\\n', '\\\\n')\n f.close()\n exec(compile(code, __file__, 'exec'), locals())\n\n\ndef _fix_config(config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n\ndef _get_build_requires(config_settings):\n config_settings = _fix_config(config_settings)\n requirements = ['setuptools', 'wheel']\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n _run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef get_requires_for_build_wheel(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings)\n\n\ndef get_requires_for_build_sdist(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings)\n\n\ndef prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]\n _run_setup()\n \n dist_info_directory = metadata_directory\n while True: \n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if len(dist_infos) == 0 and \\\n len(_get_immediate_subdirectories(dist_info_directory)) == 1:\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n\ndef build_wheel(wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = _fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n sys.argv = sys.argv[:1] + ['bdist_wheel'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if wheel_directory != 'dist':\n shutil.rmtree(wheel_directory)\n shutil.copytree('dist', wheel_directory)\n\n wheels = [f for f in os.listdir(wheel_directory)\n if f.endswith('.whl')]\n\n assert len(wheels) == 1\n return wheels[0]\n\n\ndef build_sdist(sdist_directory, config_settings=None):\n config_settings = _fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if sdist_directory != 'dist':\n shutil.rmtree(sdist_directory)\n shutil.copytree('dist', sdist_directory)\n\n sdists = [f for f in os.listdir(sdist_directory)\n if f.endswith('.tar.gz')]\n\n assert len(sdists) == 1\n return sdists[0]\n", "path": "setuptools/build_meta.py"}], "after_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\n\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n raise SetupRequirementsError(specifiers)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _run_setup(setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n f = getattr(tokenize, 'open', open)(__file__)\n code = f.read().replace('\\\\r\\\\n', '\\\\n')\n f.close()\n exec(compile(code, __file__, 'exec'), locals())\n\n\ndef _fix_config(config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n\ndef _get_build_requires(config_settings, requirements):\n config_settings = _fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n _run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef get_requires_for_build_wheel(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=['setuptools', 'wheel'])\n\n\ndef get_requires_for_build_sdist(config_settings=None):\n config_settings = _fix_config(config_settings)\n return _get_build_requires(config_settings, requirements=['setuptools'])\n\n\ndef prepare_metadata_for_build_wheel(metadata_directory, config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', _to_str(metadata_directory)]\n _run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if len(dist_infos) == 0 and \\\n len(_get_immediate_subdirectories(dist_info_directory)) == 1:\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n\ndef build_wheel(wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = _fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n sys.argv = sys.argv[:1] + ['bdist_wheel'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if wheel_directory != 'dist':\n shutil.rmtree(wheel_directory)\n shutil.copytree('dist', wheel_directory)\n\n wheels = [f for f in os.listdir(wheel_directory)\n if f.endswith('.whl')]\n\n assert len(wheels) == 1\n return wheels[0]\n\n\ndef build_sdist(sdist_directory, config_settings=None):\n config_settings = _fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist'] + \\\n config_settings[\"--global-option\"]\n _run_setup()\n if sdist_directory != 'dist':\n shutil.rmtree(sdist_directory)\n shutil.copytree('dist', sdist_directory)\n\n sdists = [f for f in os.listdir(sdist_directory)\n if f.endswith('.tar.gz')]\n\n assert len(sdists) == 1\n return sdists[0]\n", "path": "setuptools/build_meta.py"}]}
| 2,204 | 315 |
gh_patches_debug_31993
|
rasdani/github-patches
|
git_diff
|
nonebot__nonebot2-149
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
自动生成 api 文档
**是否在使用中遇到某些问题而需要新的特性?请描述:**
```python
self._server_app = FastAPI(
debug=config.debug,
openapi_url=None,
docs_url=None,
redoc_url=None,
)
```
源码好像禁止了API文档自动生成的功能,能否改为根据配置决定?例如添加一个配置项。
因为开发时要进行 API 请求调试,所以感觉这个功能还是挺有必要的
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/drivers/fastapi.py`
Content:
```
1 """
2 FastAPI 驱动适配
3 ================
4
5 后端使用方法请参考: `FastAPI 文档`_
6
7 .. _FastAPI 文档:
8 https://fastapi.tiangolo.com/
9 """
10
11 import json
12 import asyncio
13 import logging
14 from typing import Optional, Callable
15
16 import uvicorn
17 from fastapi.responses import Response
18 from fastapi import Body, status, Request, FastAPI, HTTPException
19 from starlette.websockets import WebSocketDisconnect, WebSocket as FastAPIWebSocket
20
21 from nonebot.log import logger
22 from nonebot.typing import overrides
23 from nonebot.config import Env, Config
24 from nonebot.utils import DataclassEncoder
25 from nonebot.exception import RequestDenied
26 from nonebot.drivers import Driver as BaseDriver, WebSocket as BaseWebSocket
27
28
29 class Driver(BaseDriver):
30 """
31 FastAPI 驱动框架
32
33 :上报地址:
34
35 * ``/{adapter name}/``: HTTP POST 上报
36 * ``/{adapter name}/http/``: HTTP POST 上报
37 * ``/{adapter name}/ws``: WebSocket 上报
38 * ``/{adapter name}/ws/``: WebSocket 上报
39 """
40
41 def __init__(self, env: Env, config: Config):
42 super().__init__(env, config)
43
44 self._server_app = FastAPI(
45 debug=config.debug,
46 openapi_url=None,
47 docs_url=None,
48 redoc_url=None,
49 )
50
51 self._server_app.post("/{adapter}/")(self._handle_http)
52 self._server_app.post("/{adapter}/http")(self._handle_http)
53 self._server_app.websocket("/{adapter}/ws")(self._handle_ws_reverse)
54 self._server_app.websocket("/{adapter}/ws/")(self._handle_ws_reverse)
55
56 @property
57 @overrides(BaseDriver)
58 def type(self) -> str:
59 """驱动名称: ``fastapi``"""
60 return "fastapi"
61
62 @property
63 @overrides(BaseDriver)
64 def server_app(self) -> FastAPI:
65 """``FastAPI APP`` 对象"""
66 return self._server_app
67
68 @property
69 @overrides(BaseDriver)
70 def asgi(self):
71 """``FastAPI APP`` 对象"""
72 return self._server_app
73
74 @property
75 @overrides(BaseDriver)
76 def logger(self) -> logging.Logger:
77 """fastapi 使用的 logger"""
78 return logging.getLogger("fastapi")
79
80 @overrides(BaseDriver)
81 def on_startup(self, func: Callable) -> Callable:
82 """参考文档: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_"""
83 return self.server_app.on_event("startup")(func)
84
85 @overrides(BaseDriver)
86 def on_shutdown(self, func: Callable) -> Callable:
87 """参考文档: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_"""
88 return self.server_app.on_event("shutdown")(func)
89
90 @overrides(BaseDriver)
91 def run(self,
92 host: Optional[str] = None,
93 port: Optional[int] = None,
94 *,
95 app: Optional[str] = None,
96 **kwargs):
97 """使用 ``uvicorn`` 启动 FastAPI"""
98 super().run(host, port, app, **kwargs)
99 LOGGING_CONFIG = {
100 "version": 1,
101 "disable_existing_loggers": False,
102 "handlers": {
103 "default": {
104 "class": "nonebot.log.LoguruHandler",
105 },
106 },
107 "loggers": {
108 "uvicorn.error": {
109 "handlers": ["default"],
110 "level": "INFO"
111 },
112 "uvicorn.access": {
113 "handlers": ["default"],
114 "level": "INFO",
115 },
116 },
117 }
118 uvicorn.run(app or self.server_app,
119 host=host or str(self.config.host),
120 port=port or self.config.port,
121 reload=bool(app) and self.config.debug,
122 debug=self.config.debug,
123 log_config=LOGGING_CONFIG,
124 **kwargs)
125
126 @overrides(BaseDriver)
127 async def _handle_http(self,
128 adapter: str,
129 request: Request,
130 data: dict = Body(...)):
131 if not isinstance(data, dict):
132 logger.warning("Data received is invalid")
133 raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)
134
135 if adapter not in self._adapters:
136 logger.warning(
137 f"Unknown adapter {adapter}. Please register the adapter before use."
138 )
139 raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
140 detail="adapter not found")
141
142 # 创建 Bot 对象
143 BotClass = self._adapters[adapter]
144 headers = dict(request.headers)
145 try:
146 x_self_id = await BotClass.check_permission(self, "http", headers,
147 data)
148 except RequestDenied as e:
149 raise HTTPException(status_code=e.status_code,
150 detail=e.reason) from None
151
152 if x_self_id in self._clients:
153 logger.warning("There's already a reverse websocket connection,"
154 "so the event may be handled twice.")
155
156 bot = BotClass(self, "http", self.config, x_self_id)
157
158 asyncio.create_task(bot.handle_message(data))
159 return Response("", 204)
160
161 @overrides(BaseDriver)
162 async def _handle_ws_reverse(self, adapter: str,
163 websocket: FastAPIWebSocket):
164 ws = WebSocket(websocket)
165
166 if adapter not in self._adapters:
167 logger.warning(
168 f"Unknown adapter {adapter}. Please register the adapter before use."
169 )
170 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
171 return
172
173 # Create Bot Object
174 BotClass = self._adapters[adapter]
175 headers = dict(websocket.headers)
176 try:
177 x_self_id = await BotClass.check_permission(self, "websocket",
178 headers, None)
179 except RequestDenied:
180 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
181 return
182
183 if x_self_id in self._clients:
184 logger.warning("There's already a reverse websocket connection, "
185 f"<y>{adapter.upper()} Bot {x_self_id}</y> ignored.")
186 await ws.close(code=status.WS_1008_POLICY_VIOLATION)
187 return
188
189 bot = BotClass(self, "websocket", self.config, x_self_id, websocket=ws)
190
191 await ws.accept()
192 logger.opt(colors=True).info(
193 f"WebSocket Connection from <y>{adapter.upper()} "
194 f"Bot {x_self_id}</y> Accepted!")
195
196 self._bot_connect(bot)
197
198 try:
199 while not ws.closed:
200 data = await ws.receive()
201
202 if not data:
203 continue
204
205 asyncio.create_task(bot.handle_message(data))
206 finally:
207 self._bot_disconnect(bot)
208
209
210 class WebSocket(BaseWebSocket):
211
212 def __init__(self, websocket: FastAPIWebSocket):
213 super().__init__(websocket)
214 self._closed = False
215
216 @property
217 @overrides(BaseWebSocket)
218 def closed(self):
219 return self._closed
220
221 @overrides(BaseWebSocket)
222 async def accept(self):
223 await self.websocket.accept()
224 self._closed = False
225
226 @overrides(BaseWebSocket)
227 async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):
228 await self.websocket.close(code=code)
229 self._closed = True
230
231 @overrides(BaseWebSocket)
232 async def receive(self) -> Optional[dict]:
233 data = None
234 try:
235 data = await self.websocket.receive_json()
236 if not isinstance(data, dict):
237 data = None
238 raise ValueError
239 except ValueError:
240 logger.warning("Received an invalid json message.")
241 except WebSocketDisconnect:
242 self._closed = True
243 logger.error("WebSocket disconnected by peer.")
244
245 return data
246
247 @overrides(BaseWebSocket)
248 async def send(self, data: dict) -> None:
249 text = json.dumps(data, cls=DataclassEncoder)
250 await self.websocket.send({"type": "websocket.send", "text": text})
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nonebot/drivers/fastapi.py b/nonebot/drivers/fastapi.py
--- a/nonebot/drivers/fastapi.py
+++ b/nonebot/drivers/fastapi.py
@@ -14,18 +14,28 @@
from typing import Optional, Callable
import uvicorn
+from pydantic import BaseSettings
from fastapi.responses import Response
from fastapi import Body, status, Request, FastAPI, HTTPException
from starlette.websockets import WebSocketDisconnect, WebSocket as FastAPIWebSocket
from nonebot.log import logger
from nonebot.typing import overrides
-from nonebot.config import Env, Config
from nonebot.utils import DataclassEncoder
from nonebot.exception import RequestDenied
+from nonebot.config import Env, Config as NoneBotConfig
from nonebot.drivers import Driver as BaseDriver, WebSocket as BaseWebSocket
+class Config(BaseSettings):
+ fastapi_openapi_url: Optional[str] = None
+ fastapi_docs_url: Optional[str] = None
+ fastapi_redoc_url: Optional[str] = None
+
+ class Config:
+ extra = "ignore"
+
+
class Driver(BaseDriver):
"""
FastAPI 驱动框架
@@ -38,14 +48,16 @@
* ``/{adapter name}/ws/``: WebSocket 上报
"""
- def __init__(self, env: Env, config: Config):
+ def __init__(self, env: Env, config: NoneBotConfig):
super().__init__(env, config)
+ self.fastapi_config = Config(**config.dict())
+
self._server_app = FastAPI(
debug=config.debug,
- openapi_url=None,
- docs_url=None,
- redoc_url=None,
+ openapi_url=self.fastapi_config.fastapi_openapi_url,
+ docs_url=self.fastapi_config.fastapi_docs_url,
+ redoc_url=self.fastapi_config.fastapi_redoc_url,
)
self._server_app.post("/{adapter}/")(self._handle_http)
|
{"golden_diff": "diff --git a/nonebot/drivers/fastapi.py b/nonebot/drivers/fastapi.py\n--- a/nonebot/drivers/fastapi.py\n+++ b/nonebot/drivers/fastapi.py\n@@ -14,18 +14,28 @@\n from typing import Optional, Callable\n \n import uvicorn\n+from pydantic import BaseSettings\n from fastapi.responses import Response\n from fastapi import Body, status, Request, FastAPI, HTTPException\n from starlette.websockets import WebSocketDisconnect, WebSocket as FastAPIWebSocket\n \n from nonebot.log import logger\n from nonebot.typing import overrides\n-from nonebot.config import Env, Config\n from nonebot.utils import DataclassEncoder\n from nonebot.exception import RequestDenied\n+from nonebot.config import Env, Config as NoneBotConfig\n from nonebot.drivers import Driver as BaseDriver, WebSocket as BaseWebSocket\n \n \n+class Config(BaseSettings):\n+ fastapi_openapi_url: Optional[str] = None\n+ fastapi_docs_url: Optional[str] = None\n+ fastapi_redoc_url: Optional[str] = None\n+\n+ class Config:\n+ extra = \"ignore\"\n+\n+\n class Driver(BaseDriver):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\n@@ -38,14 +48,16 @@\n * ``/{adapter name}/ws/``: WebSocket \u4e0a\u62a5\n \"\"\"\n \n- def __init__(self, env: Env, config: Config):\n+ def __init__(self, env: Env, config: NoneBotConfig):\n super().__init__(env, config)\n \n+ self.fastapi_config = Config(**config.dict())\n+\n self._server_app = FastAPI(\n debug=config.debug,\n- openapi_url=None,\n- docs_url=None,\n- redoc_url=None,\n+ openapi_url=self.fastapi_config.fastapi_openapi_url,\n+ docs_url=self.fastapi_config.fastapi_docs_url,\n+ redoc_url=self.fastapi_config.fastapi_redoc_url,\n )\n \n self._server_app.post(\"/{adapter}/\")(self._handle_http)\n", "issue": "\u81ea\u52a8\u751f\u6210 api \u6587\u6863\n**\u662f\u5426\u5728\u4f7f\u7528\u4e2d\u9047\u5230\u67d0\u4e9b\u95ee\u9898\u800c\u9700\u8981\u65b0\u7684\u7279\u6027\uff1f\u8bf7\u63cf\u8ff0\uff1a**\r\n\r\n```python\r\nself._server_app = FastAPI(\r\n debug=config.debug,\r\n openapi_url=None,\r\n docs_url=None,\r\n redoc_url=None,\r\n )\r\n```\r\n\r\n\u6e90\u7801\u597d\u50cf\u7981\u6b62\u4e86API\u6587\u6863\u81ea\u52a8\u751f\u6210\u7684\u529f\u80fd\uff0c\u80fd\u5426\u6539\u4e3a\u6839\u636e\u914d\u7f6e\u51b3\u5b9a\uff1f\u4f8b\u5982\u6dfb\u52a0\u4e00\u4e2a\u914d\u7f6e\u9879\u3002\r\n\r\n\u56e0\u4e3a\u5f00\u53d1\u65f6\u8981\u8fdb\u884c API \u8bf7\u6c42\u8c03\u8bd5\uff0c\u6240\u4ee5\u611f\u89c9\u8fd9\u4e2a\u529f\u80fd\u8fd8\u662f\u633a\u6709\u5fc5\u8981\u7684\r\n\n", "before_files": [{"content": "\"\"\"\nFastAPI \u9a71\u52a8\u9002\u914d\n================\n\n\u540e\u7aef\u4f7f\u7528\u65b9\u6cd5\u8bf7\u53c2\u8003: `FastAPI \u6587\u6863`_\n\n.. _FastAPI \u6587\u6863:\n https://fastapi.tiangolo.com/\n\"\"\"\n\nimport json\nimport asyncio\nimport logging\nfrom typing import Optional, Callable\n\nimport uvicorn\nfrom fastapi.responses import Response\nfrom fastapi import Body, status, Request, FastAPI, HTTPException\nfrom starlette.websockets import WebSocketDisconnect, WebSocket as FastAPIWebSocket\n\nfrom nonebot.log import logger\nfrom nonebot.typing import overrides\nfrom nonebot.config import Env, Config\nfrom nonebot.utils import DataclassEncoder\nfrom nonebot.exception import RequestDenied\nfrom nonebot.drivers import Driver as BaseDriver, WebSocket as BaseWebSocket\n\n\nclass Driver(BaseDriver):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\n\n :\u4e0a\u62a5\u5730\u5740:\n\n * ``/{adapter name}/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/http/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/ws``: WebSocket \u4e0a\u62a5\n * ``/{adapter name}/ws/``: WebSocket \u4e0a\u62a5\n \"\"\"\n\n def __init__(self, env: Env, config: Config):\n super().__init__(env, config)\n\n self._server_app = FastAPI(\n debug=config.debug,\n openapi_url=None,\n docs_url=None,\n redoc_url=None,\n )\n\n self._server_app.post(\"/{adapter}/\")(self._handle_http)\n self._server_app.post(\"/{adapter}/http\")(self._handle_http)\n self._server_app.websocket(\"/{adapter}/ws\")(self._handle_ws_reverse)\n self._server_app.websocket(\"/{adapter}/ws/\")(self._handle_ws_reverse)\n\n @property\n @overrides(BaseDriver)\n def type(self) -> str:\n \"\"\"\u9a71\u52a8\u540d\u79f0: ``fastapi``\"\"\"\n return \"fastapi\"\n\n @property\n @overrides(BaseDriver)\n def server_app(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(BaseDriver)\n def asgi(self):\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(BaseDriver)\n def logger(self) -> logging.Logger:\n \"\"\"fastapi \u4f7f\u7528\u7684 logger\"\"\"\n return logging.getLogger(\"fastapi\")\n\n @overrides(BaseDriver)\n def on_startup(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"startup\")(func)\n\n @overrides(BaseDriver)\n def on_shutdown(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"shutdown\")(func)\n\n @overrides(BaseDriver)\n def run(self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n *,\n app: Optional[str] = None,\n **kwargs):\n \"\"\"\u4f7f\u7528 ``uvicorn`` \u542f\u52a8 FastAPI\"\"\"\n super().run(host, port, app, **kwargs)\n LOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"nonebot.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\"\n },\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n }\n uvicorn.run(app or self.server_app,\n host=host or str(self.config.host),\n port=port or self.config.port,\n reload=bool(app) and self.config.debug,\n debug=self.config.debug,\n log_config=LOGGING_CONFIG,\n **kwargs)\n\n @overrides(BaseDriver)\n async def _handle_http(self,\n adapter: str,\n request: Request,\n data: dict = Body(...)):\n if not isinstance(data, dict):\n logger.warning(\"Data received is invalid\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"adapter not found\")\n\n # \u521b\u5efa Bot \u5bf9\u8c61\n BotClass = self._adapters[adapter]\n headers = dict(request.headers)\n try:\n x_self_id = await BotClass.check_permission(self, \"http\", headers,\n data)\n except RequestDenied as e:\n raise HTTPException(status_code=e.status_code,\n detail=e.reason) from None\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection,\"\n \"so the event may be handled twice.\")\n\n bot = BotClass(self, \"http\", self.config, x_self_id)\n\n asyncio.create_task(bot.handle_message(data))\n return Response(\"\", 204)\n\n @overrides(BaseDriver)\n async def _handle_ws_reverse(self, adapter: str,\n websocket: FastAPIWebSocket):\n ws = WebSocket(websocket)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n # Create Bot Object\n BotClass = self._adapters[adapter]\n headers = dict(websocket.headers)\n try:\n x_self_id = await BotClass.check_permission(self, \"websocket\",\n headers, None)\n except RequestDenied:\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection, \"\n f\"<y>{adapter.upper()} Bot {x_self_id}</y> ignored.\")\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n bot = BotClass(self, \"websocket\", self.config, x_self_id, websocket=ws)\n\n await ws.accept()\n logger.opt(colors=True).info(\n f\"WebSocket Connection from <y>{adapter.upper()} \"\n f\"Bot {x_self_id}</y> Accepted!\")\n\n self._bot_connect(bot)\n\n try:\n while not ws.closed:\n data = await ws.receive()\n\n if not data:\n continue\n\n asyncio.create_task(bot.handle_message(data))\n finally:\n self._bot_disconnect(bot)\n\n\nclass WebSocket(BaseWebSocket):\n\n def __init__(self, websocket: FastAPIWebSocket):\n super().__init__(websocket)\n self._closed = False\n\n @property\n @overrides(BaseWebSocket)\n def closed(self):\n return self._closed\n\n @overrides(BaseWebSocket)\n async def accept(self):\n await self.websocket.accept()\n self._closed = False\n\n @overrides(BaseWebSocket)\n async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):\n await self.websocket.close(code=code)\n self._closed = True\n\n @overrides(BaseWebSocket)\n async def receive(self) -> Optional[dict]:\n data = None\n try:\n data = await self.websocket.receive_json()\n if not isinstance(data, dict):\n data = None\n raise ValueError\n except ValueError:\n logger.warning(\"Received an invalid json message.\")\n except WebSocketDisconnect:\n self._closed = True\n logger.error(\"WebSocket disconnected by peer.\")\n\n return data\n\n @overrides(BaseWebSocket)\n async def send(self, data: dict) -> None:\n text = json.dumps(data, cls=DataclassEncoder)\n await self.websocket.send({\"type\": \"websocket.send\", \"text\": text})\n", "path": "nonebot/drivers/fastapi.py"}], "after_files": [{"content": "\"\"\"\nFastAPI \u9a71\u52a8\u9002\u914d\n================\n\n\u540e\u7aef\u4f7f\u7528\u65b9\u6cd5\u8bf7\u53c2\u8003: `FastAPI \u6587\u6863`_\n\n.. _FastAPI \u6587\u6863:\n https://fastapi.tiangolo.com/\n\"\"\"\n\nimport json\nimport asyncio\nimport logging\nfrom typing import Optional, Callable\n\nimport uvicorn\nfrom pydantic import BaseSettings\nfrom fastapi.responses import Response\nfrom fastapi import Body, status, Request, FastAPI, HTTPException\nfrom starlette.websockets import WebSocketDisconnect, WebSocket as FastAPIWebSocket\n\nfrom nonebot.log import logger\nfrom nonebot.typing import overrides\nfrom nonebot.utils import DataclassEncoder\nfrom nonebot.exception import RequestDenied\nfrom nonebot.config import Env, Config as NoneBotConfig\nfrom nonebot.drivers import Driver as BaseDriver, WebSocket as BaseWebSocket\n\n\nclass Config(BaseSettings):\n fastapi_openapi_url: Optional[str] = None\n fastapi_docs_url: Optional[str] = None\n fastapi_redoc_url: Optional[str] = None\n\n class Config:\n extra = \"ignore\"\n\n\nclass Driver(BaseDriver):\n \"\"\"\n FastAPI \u9a71\u52a8\u6846\u67b6\n\n :\u4e0a\u62a5\u5730\u5740:\n\n * ``/{adapter name}/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/http/``: HTTP POST \u4e0a\u62a5\n * ``/{adapter name}/ws``: WebSocket \u4e0a\u62a5\n * ``/{adapter name}/ws/``: WebSocket \u4e0a\u62a5\n \"\"\"\n\n def __init__(self, env: Env, config: NoneBotConfig):\n super().__init__(env, config)\n\n self.fastapi_config = Config(**config.dict())\n\n self._server_app = FastAPI(\n debug=config.debug,\n openapi_url=self.fastapi_config.fastapi_openapi_url,\n docs_url=self.fastapi_config.fastapi_docs_url,\n redoc_url=self.fastapi_config.fastapi_redoc_url,\n )\n\n self._server_app.post(\"/{adapter}/\")(self._handle_http)\n self._server_app.post(\"/{adapter}/http\")(self._handle_http)\n self._server_app.websocket(\"/{adapter}/ws\")(self._handle_ws_reverse)\n self._server_app.websocket(\"/{adapter}/ws/\")(self._handle_ws_reverse)\n\n @property\n @overrides(BaseDriver)\n def type(self) -> str:\n \"\"\"\u9a71\u52a8\u540d\u79f0: ``fastapi``\"\"\"\n return \"fastapi\"\n\n @property\n @overrides(BaseDriver)\n def server_app(self) -> FastAPI:\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(BaseDriver)\n def asgi(self):\n \"\"\"``FastAPI APP`` \u5bf9\u8c61\"\"\"\n return self._server_app\n\n @property\n @overrides(BaseDriver)\n def logger(self) -> logging.Logger:\n \"\"\"fastapi \u4f7f\u7528\u7684 logger\"\"\"\n return logging.getLogger(\"fastapi\")\n\n @overrides(BaseDriver)\n def on_startup(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"startup\")(func)\n\n @overrides(BaseDriver)\n def on_shutdown(self, func: Callable) -> Callable:\n \"\"\"\u53c2\u8003\u6587\u6863: `Events <https://fastapi.tiangolo.com/advanced/events/#startup-event>`_\"\"\"\n return self.server_app.on_event(\"shutdown\")(func)\n\n @overrides(BaseDriver)\n def run(self,\n host: Optional[str] = None,\n port: Optional[int] = None,\n *,\n app: Optional[str] = None,\n **kwargs):\n \"\"\"\u4f7f\u7528 ``uvicorn`` \u542f\u52a8 FastAPI\"\"\"\n super().run(host, port, app, **kwargs)\n LOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"nonebot.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\"\n },\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n }\n uvicorn.run(app or self.server_app,\n host=host or str(self.config.host),\n port=port or self.config.port,\n reload=bool(app) and self.config.debug,\n debug=self.config.debug,\n log_config=LOGGING_CONFIG,\n **kwargs)\n\n @overrides(BaseDriver)\n async def _handle_http(self,\n adapter: str,\n request: Request,\n data: dict = Body(...)):\n if not isinstance(data, dict):\n logger.warning(\"Data received is invalid\")\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=\"adapter not found\")\n\n # \u521b\u5efa Bot \u5bf9\u8c61\n BotClass = self._adapters[adapter]\n headers = dict(request.headers)\n try:\n x_self_id = await BotClass.check_permission(self, \"http\", headers,\n data)\n except RequestDenied as e:\n raise HTTPException(status_code=e.status_code,\n detail=e.reason) from None\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection,\"\n \"so the event may be handled twice.\")\n\n bot = BotClass(self, \"http\", self.config, x_self_id)\n\n asyncio.create_task(bot.handle_message(data))\n return Response(\"\", 204)\n\n @overrides(BaseDriver)\n async def _handle_ws_reverse(self, adapter: str,\n websocket: FastAPIWebSocket):\n ws = WebSocket(websocket)\n\n if adapter not in self._adapters:\n logger.warning(\n f\"Unknown adapter {adapter}. Please register the adapter before use.\"\n )\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n # Create Bot Object\n BotClass = self._adapters[adapter]\n headers = dict(websocket.headers)\n try:\n x_self_id = await BotClass.check_permission(self, \"websocket\",\n headers, None)\n except RequestDenied:\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n if x_self_id in self._clients:\n logger.warning(\"There's already a reverse websocket connection, \"\n f\"<y>{adapter.upper()} Bot {x_self_id}</y> ignored.\")\n await ws.close(code=status.WS_1008_POLICY_VIOLATION)\n return\n\n bot = BotClass(self, \"websocket\", self.config, x_self_id, websocket=ws)\n\n await ws.accept()\n logger.opt(colors=True).info(\n f\"WebSocket Connection from <y>{adapter.upper()} \"\n f\"Bot {x_self_id}</y> Accepted!\")\n\n self._bot_connect(bot)\n\n try:\n while not ws.closed:\n data = await ws.receive()\n\n if not data:\n continue\n\n asyncio.create_task(bot.handle_message(data))\n finally:\n self._bot_disconnect(bot)\n\n\nclass WebSocket(BaseWebSocket):\n\n def __init__(self, websocket: FastAPIWebSocket):\n super().__init__(websocket)\n self._closed = False\n\n @property\n @overrides(BaseWebSocket)\n def closed(self):\n return self._closed\n\n @overrides(BaseWebSocket)\n async def accept(self):\n await self.websocket.accept()\n self._closed = False\n\n @overrides(BaseWebSocket)\n async def close(self, code: int = status.WS_1000_NORMAL_CLOSURE):\n await self.websocket.close(code=code)\n self._closed = True\n\n @overrides(BaseWebSocket)\n async def receive(self) -> Optional[dict]:\n data = None\n try:\n data = await self.websocket.receive_json()\n if not isinstance(data, dict):\n data = None\n raise ValueError\n except ValueError:\n logger.warning(\"Received an invalid json message.\")\n except WebSocketDisconnect:\n self._closed = True\n logger.error(\"WebSocket disconnected by peer.\")\n\n return data\n\n @overrides(BaseWebSocket)\n async def send(self, data: dict) -> None:\n text = json.dumps(data, cls=DataclassEncoder)\n await self.websocket.send({\"type\": \"websocket.send\", \"text\": text})\n", "path": "nonebot/drivers/fastapi.py"}]}
| 2,753 | 446 |
gh_patches_debug_29603
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3976
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Suppression comment in Dockerfile skips more checks than it should
**Describe the issue**
I tried to skip CKV_DOCKER_9 by adding a suppression comment into my Dockerfile, but it turned out that CKV_DOCKER_5 was also skipped.
**Examples**
Here's my Dockerfile with comment to skip CKV_DOCKER_9:
```dockerfile
#checkov:skip=CKV_DOCKER_9
FROM alpine:3.16
RUN ls
```
As you can see, CKV_DOCKER_5 is also marked as SKIPPED in the results:
```
$ docker run -it --rm -v "$(pwd):$(pwd)" -w "$(pwd)" bridgecrew/checkov:2.2.104 --framework dockerfile --skip-download --file Dockerfile
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.2.104
dockerfile scan results:
Passed checks: 2, Failed checks: 2, Skipped checks: 2
Check: CKV_DOCKER_11: "Ensure From Alias are unique for multistage builds."
PASSED for resource: Dockerfile.
File: Dockerfile:1-4
Check: CKV_DOCKER_7: "Ensure the base image uses a non latest version tag"
PASSED for resource: Dockerfile.
File: Dockerfile:1-4
Check: CKV_DOCKER_2: "Ensure that HEALTHCHECK instructions have been added to container images"
FAILED for resource: Dockerfile.
File: Dockerfile:1-4
1 | #checkov:skip=CKV_DOCKER_9
2 | FROM alpine:3.16
3 | RUN ls
4 |
Check: CKV_DOCKER_3: "Ensure that a user for the container has been created"
FAILED for resource: Dockerfile.
File: Dockerfile:1-4
1 | #checkov:skip=CKV_DOCKER_9
2 | FROM alpine:3.16
3 | RUN ls
4 |
Check: CKV_DOCKER_9: "Ensure that APT isn't used"
SKIPPED for resource: Dockerfile.
Suppress comment: No comment provided
File: Dockerfile:1-4
Check: CKV_DOCKER_5: "Ensure update instructions are not use alone in the Dockerfile"
SKIPPED for resource: Dockerfile.
Suppress comment: No comment provided
File: Dockerfile:1-4
```
**Version (please complete the following information):**
- Checkov Version: 2.2.104
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/base_registry.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, Any, cast
4
5 from checkov.common.bridgecrew.check_type import CheckType
6 from checkov.common.checks.base_check_registry import BaseCheckRegistry
7 from checkov.common.models.enums import CheckResult
8
9 if TYPE_CHECKING:
10 from dockerfile_parse.parser import _Instruction # only in extra_stubs
11 from checkov.common.checks.base_check import BaseCheck
12 from checkov.common.typing import _SkippedCheck, _CheckResult
13 from checkov.runner_filter import RunnerFilter
14
15
16 class Registry(BaseCheckRegistry):
17 def __init__(self) -> None:
18 super().__init__(CheckType.DOCKERFILE)
19
20 def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, str, dict[str, Any]]:
21 # not needed
22 return "", "", {}
23
24 def scan(
25 self,
26 scanned_file: str,
27 entity: dict[str, list[_Instruction]],
28 skipped_checks: list[_SkippedCheck],
29 runner_filter: RunnerFilter,
30 report_type: str | None = None,
31 ) -> dict[BaseCheck, _CheckResult]:
32
33 results: "dict[BaseCheck, _CheckResult]" = {}
34 if not entity:
35 return results
36 for instruction, checks in self.checks.items():
37 skip_info: _SkippedCheck = {}
38 if instruction in entity:
39
40 for check in checks:
41 if check.id in [x['id'] for x in skipped_checks]:
42 skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
43
44 if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):
45 self.update_result(
46 check=check,
47 entity_configuration=entity[instruction],
48 entity_name=instruction,
49 entity_type=instruction,
50 results=results,
51 scanned_file=scanned_file,
52 skip_info=skip_info,
53 )
54
55 for check in self.wildcard_checks["*"]:
56 skip_info = {}
57 if skipped_checks:
58 if check.id in [x['id'] for x in skipped_checks]:
59 skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
60
61 if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):
62 self.update_result(
63 check=check,
64 entity_configuration=entity,
65 entity_name=scanned_file,
66 entity_type="*",
67 results=results,
68 scanned_file=scanned_file,
69 skip_info=skip_info,
70 )
71 return results
72
73 def update_result(
74 self,
75 check: BaseCheck,
76 entity_configuration: list[_Instruction] | dict[str, list[_Instruction]],
77 entity_name: str,
78 entity_type: str,
79 results: dict[BaseCheck, _CheckResult],
80 scanned_file: str,
81 skip_info: _SkippedCheck
82 ) -> None:
83 result = self.run_check(
84 check=check,
85 entity_configuration=entity_configuration, # type:ignore[arg-type] # special Dockerfile runner behaviour
86 entity_name=entity_name,
87 entity_type=entity_type,
88 scanned_file=scanned_file,
89 skip_info=skip_info,
90 )
91 results[check] = {}
92 if result['result'] == CheckResult.SKIPPED:
93 results[check]['result'] = result['result']
94 results[check]['suppress_comment'] = result['suppress_comment']
95 results[check]['results_configuration'] = None
96 else:
97 results[check]['result'] = cast("CheckResult", result['result'][0])
98 results[check]['results_configuration'] = cast("dict[str, Any]", result['result'][1])
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/dockerfile/base_registry.py b/checkov/dockerfile/base_registry.py
--- a/checkov/dockerfile/base_registry.py
+++ b/checkov/dockerfile/base_registry.py
@@ -33,13 +33,16 @@
results: "dict[BaseCheck, _CheckResult]" = {}
if not entity:
return results
+
+ skipped_check_ids = {skipped_check["id"]: skipped_check for skipped_check in skipped_checks}
+
for instruction, checks in self.checks.items():
- skip_info: _SkippedCheck = {}
if instruction in entity:
-
for check in checks:
- if check.id in [x['id'] for x in skipped_checks]:
- skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
+ skip_info: "_SkippedCheck" = {}
+ if skipped_check_ids:
+ if check.id in skipped_check_ids:
+ skip_info = skipped_check_ids[check.id]
if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):
self.update_result(
@@ -54,9 +57,9 @@
for check in self.wildcard_checks["*"]:
skip_info = {}
- if skipped_checks:
- if check.id in [x['id'] for x in skipped_checks]:
- skip_info = [x for x in skipped_checks if x['id'] == check.id][0]
+ if skipped_check_ids:
+ if check.id in skipped_check_ids:
+ skip_info = skipped_check_ids[check.id]
if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):
self.update_result(
|
{"golden_diff": "diff --git a/checkov/dockerfile/base_registry.py b/checkov/dockerfile/base_registry.py\n--- a/checkov/dockerfile/base_registry.py\n+++ b/checkov/dockerfile/base_registry.py\n@@ -33,13 +33,16 @@\n results: \"dict[BaseCheck, _CheckResult]\" = {}\n if not entity:\n return results\n+\n+ skipped_check_ids = {skipped_check[\"id\"]: skipped_check for skipped_check in skipped_checks}\n+\n for instruction, checks in self.checks.items():\n- skip_info: _SkippedCheck = {}\n if instruction in entity:\n-\n for check in checks:\n- if check.id in [x['id'] for x in skipped_checks]:\n- skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n+ skip_info: \"_SkippedCheck\" = {}\n+ if skipped_check_ids:\n+ if check.id in skipped_check_ids:\n+ skip_info = skipped_check_ids[check.id]\n \n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n@@ -54,9 +57,9 @@\n \n for check in self.wildcard_checks[\"*\"]:\n skip_info = {}\n- if skipped_checks:\n- if check.id in [x['id'] for x in skipped_checks]:\n- skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n+ if skipped_check_ids:\n+ if check.id in skipped_check_ids:\n+ skip_info = skipped_check_ids[check.id]\n \n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n", "issue": "Suppression comment in Dockerfile skips more checks than it should\n**Describe the issue**\r\nI tried to skip CKV_DOCKER_9 by adding a suppression comment into my Dockerfile, but it turned out that CKV_DOCKER_5 was also skipped.\r\n\r\n**Examples**\r\nHere's my Dockerfile with comment to skip CKV_DOCKER_9:\r\n```dockerfile\r\n#checkov:skip=CKV_DOCKER_9\r\nFROM alpine:3.16\r\nRUN ls\r\n```\r\nAs you can see, CKV_DOCKER_5 is also marked as SKIPPED in the results:\r\n```\r\n$ docker run -it --rm -v \"$(pwd):$(pwd)\" -w \"$(pwd)\" bridgecrew/checkov:2.2.104 --framework dockerfile --skip-download --file Dockerfile\r\n\r\n _ _ \r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V / \r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/ \r\n \r\nBy bridgecrew.io | version: 2.2.104 \r\n\r\ndockerfile scan results:\r\n\r\nPassed checks: 2, Failed checks: 2, Skipped checks: 2\r\n\r\nCheck: CKV_DOCKER_11: \"Ensure From Alias are unique for multistage builds.\"\r\n\tPASSED for resource: Dockerfile.\r\n\tFile: Dockerfile:1-4\r\nCheck: CKV_DOCKER_7: \"Ensure the base image uses a non latest version tag\"\r\n\tPASSED for resource: Dockerfile.\r\n\tFile: Dockerfile:1-4\r\nCheck: CKV_DOCKER_2: \"Ensure that HEALTHCHECK instructions have been added to container images\"\r\n\tFAILED for resource: Dockerfile.\r\n\tFile: Dockerfile:1-4\r\n\r\n\t\t1 | #checkov:skip=CKV_DOCKER_9\r\n\t\t2 | FROM alpine:3.16\r\n\t\t3 | RUN ls\r\n\t\t4 | \r\n\r\nCheck: CKV_DOCKER_3: \"Ensure that a user for the container has been created\"\r\n\tFAILED for resource: Dockerfile.\r\n\tFile: Dockerfile:1-4\r\n\r\n\t\t1 | #checkov:skip=CKV_DOCKER_9\r\n\t\t2 | FROM alpine:3.16\r\n\t\t3 | RUN ls\r\n\t\t4 | \r\n\r\nCheck: CKV_DOCKER_9: \"Ensure that APT isn't used\"\r\n\tSKIPPED for resource: Dockerfile.\r\n\tSuppress comment: No comment provided\r\n\tFile: Dockerfile:1-4\r\nCheck: CKV_DOCKER_5: \"Ensure update instructions are not use alone in the Dockerfile\"\r\n\tSKIPPED for resource: Dockerfile.\r\n\tSuppress comment: No comment provided\r\n\tFile: Dockerfile:1-4\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version: 2.2.104\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom checkov.common.bridgecrew.check_type import CheckType\nfrom checkov.common.checks.base_check_registry import BaseCheckRegistry\nfrom checkov.common.models.enums import CheckResult\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction # only in extra_stubs\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _SkippedCheck, _CheckResult\n from checkov.runner_filter import RunnerFilter\n\n\nclass Registry(BaseCheckRegistry):\n def __init__(self) -> None:\n super().__init__(CheckType.DOCKERFILE)\n\n def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, str, dict[str, Any]]:\n # not needed\n return \"\", \"\", {}\n\n def scan(\n self,\n scanned_file: str,\n entity: dict[str, list[_Instruction]],\n skipped_checks: list[_SkippedCheck],\n runner_filter: RunnerFilter,\n report_type: str | None = None,\n ) -> dict[BaseCheck, _CheckResult]:\n\n results: \"dict[BaseCheck, _CheckResult]\" = {}\n if not entity:\n return results\n for instruction, checks in self.checks.items():\n skip_info: _SkippedCheck = {}\n if instruction in entity:\n\n for check in checks:\n if check.id in [x['id'] for x in skipped_checks]:\n skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n\n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n check=check,\n entity_configuration=entity[instruction],\n entity_name=instruction,\n entity_type=instruction,\n results=results,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n\n for check in self.wildcard_checks[\"*\"]:\n skip_info = {}\n if skipped_checks:\n if check.id in [x['id'] for x in skipped_checks]:\n skip_info = [x for x in skipped_checks if x['id'] == check.id][0]\n\n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n check=check,\n entity_configuration=entity,\n entity_name=scanned_file,\n entity_type=\"*\",\n results=results,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n return results\n\n def update_result(\n self,\n check: BaseCheck,\n entity_configuration: list[_Instruction] | dict[str, list[_Instruction]],\n entity_name: str,\n entity_type: str,\n results: dict[BaseCheck, _CheckResult],\n scanned_file: str,\n skip_info: _SkippedCheck\n ) -> None:\n result = self.run_check(\n check=check,\n entity_configuration=entity_configuration, # type:ignore[arg-type] # special Dockerfile runner behaviour\n entity_name=entity_name,\n entity_type=entity_type,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n results[check] = {}\n if result['result'] == CheckResult.SKIPPED:\n results[check]['result'] = result['result']\n results[check]['suppress_comment'] = result['suppress_comment']\n results[check]['results_configuration'] = None\n else:\n results[check]['result'] = cast(\"CheckResult\", result['result'][0])\n results[check]['results_configuration'] = cast(\"dict[str, Any]\", result['result'][1])\n", "path": "checkov/dockerfile/base_registry.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom checkov.common.bridgecrew.check_type import CheckType\nfrom checkov.common.checks.base_check_registry import BaseCheckRegistry\nfrom checkov.common.models.enums import CheckResult\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction # only in extra_stubs\n from checkov.common.checks.base_check import BaseCheck\n from checkov.common.typing import _SkippedCheck, _CheckResult\n from checkov.runner_filter import RunnerFilter\n\n\nclass Registry(BaseCheckRegistry):\n def __init__(self) -> None:\n super().__init__(CheckType.DOCKERFILE)\n\n def extract_entity_details(self, entity: dict[str, Any]) -> tuple[str, str, dict[str, Any]]:\n # not needed\n return \"\", \"\", {}\n\n def scan(\n self,\n scanned_file: str,\n entity: dict[str, list[_Instruction]],\n skipped_checks: list[_SkippedCheck],\n runner_filter: RunnerFilter,\n report_type: str | None = None,\n ) -> dict[BaseCheck, _CheckResult]:\n\n results: \"dict[BaseCheck, _CheckResult]\" = {}\n if not entity:\n return results\n\n skipped_check_ids = {skipped_check[\"id\"]: skipped_check for skipped_check in skipped_checks}\n\n for instruction, checks in self.checks.items():\n if instruction in entity:\n for check in checks:\n skip_info: \"_SkippedCheck\" = {}\n if skipped_check_ids:\n if check.id in skipped_check_ids:\n skip_info = skipped_check_ids[check.id]\n\n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n check=check,\n entity_configuration=entity[instruction],\n entity_name=instruction,\n entity_type=instruction,\n results=results,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n\n for check in self.wildcard_checks[\"*\"]:\n skip_info = {}\n if skipped_check_ids:\n if check.id in skipped_check_ids:\n skip_info = skipped_check_ids[check.id]\n\n if runner_filter.should_run_check(check, report_type=CheckType.DOCKERFILE):\n self.update_result(\n check=check,\n entity_configuration=entity,\n entity_name=scanned_file,\n entity_type=\"*\",\n results=results,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n return results\n\n def update_result(\n self,\n check: BaseCheck,\n entity_configuration: list[_Instruction] | dict[str, list[_Instruction]],\n entity_name: str,\n entity_type: str,\n results: dict[BaseCheck, _CheckResult],\n scanned_file: str,\n skip_info: _SkippedCheck\n ) -> None:\n result = self.run_check(\n check=check,\n entity_configuration=entity_configuration, # type:ignore[arg-type] # special Dockerfile runner behaviour\n entity_name=entity_name,\n entity_type=entity_type,\n scanned_file=scanned_file,\n skip_info=skip_info,\n )\n results[check] = {}\n if result['result'] == CheckResult.SKIPPED:\n results[check]['result'] = result['result']\n results[check]['suppress_comment'] = result['suppress_comment']\n results[check]['results_configuration'] = None\n else:\n results[check]['result'] = cast(\"CheckResult\", result['result'][0])\n results[check]['results_configuration'] = cast(\"dict[str, Any]\", result['result'][1])\n", "path": "checkov/dockerfile/base_registry.py"}]}
| 1,908 | 373 |
gh_patches_debug_38791
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-2994
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sensor fails to retrieve kv object after some time.
st2 2.0.1
mongo 3.2.10
I have a polling sensor that queries an ES database and when a datapoint crosses a threshold, it sends an alert. Once alerted, it stores a key with a 24-hour ttl so that the sensor knows to skip alerts for the rest of the day. I also have a mechanism for silencing alerts with the same approach. This used to work well before I upgraded st2, but now after a few days of running st2 the sensor keeps alerting even though there’s an `alerted` key in the keystore. When I restart st2 everything goes back to normal for a few days then the problem comes back.
I can probably get permission to publish my entire pack, but here's the gist of the dispatch_trigger logic in my sensor:
https://gist.github.com/pixelrebel/240ca95c480d1377175b3667de0411cb
And here's the gist of my action chain that sets the `alerted` key.
https://gist.github.com/pixelrebel/6def658dfd2ee43fe71ce7735db76b29
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/st2common/services/datastore.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from st2client.client import Client
17 from st2client.models import KeyValuePair
18 from st2common.services.access import create_token
19 from st2common.util.api import get_full_public_api_url
20 from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
21
22
23 class DatastoreService(object):
24 """
25 Class provides public methods for accessing datastore items.
26 """
27
28 DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR
29
30 def __init__(self, logger, pack_name, class_name, api_username):
31 self._api_username = api_username
32 self._pack_name = pack_name
33 self._class_name = class_name
34 self._logger = logger
35
36 self._client = None
37
38 ##################################
39 # Methods for datastore management
40 ##################################
41
42 def list_values(self, local=True, prefix=None):
43 """
44 Retrieve all the datastores items.
45
46 :param local: List values from a namespace local to this pack/class. Defaults to True.
47 :type: local: ``bool``
48
49 :param prefix: Optional key name prefix / startswith filter.
50 :type prefix: ``str``
51
52 :rtype: ``list`` of :class:`KeyValuePair`
53 """
54 client = self._get_api_client()
55 self._logger.audit('Retrieving all the value from the datastore')
56
57 key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
58 kvps = client.keys.get_all(prefix=key_prefix)
59 return kvps
60
61 def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
62 """
63 Retrieve a value from the datastore for the provided key.
64
65 By default, value is retrieved from the namespace local to the pack/class. If you want to
66 retrieve a global value from a datastore, pass local=False to this method.
67
68 :param name: Key name.
69 :type name: ``str``
70
71 :param local: Retrieve value from a namespace local to the pack/class. Defaults to True.
72 :type: local: ``bool``
73
74 :param scope: Scope under which item is saved. Defaults to system scope.
75 :type: local: ``str``
76
77 :param encrypt: Return the decrypted value. Defaults to False.
78 :type: local: ``bool``
79
80 :rtype: ``str`` or ``None``
81 """
82 if scope != SYSTEM_SCOPE:
83 raise ValueError('Scope %s is unsupported.' % scope)
84
85 name = self._get_full_key_name(name=name, local=local)
86
87 client = self._get_api_client()
88 self._logger.audit('Retrieving value from the datastore (name=%s)', name)
89
90 try:
91 params = {'decrypt': str(decrypt).lower(), 'scope': scope}
92 kvp = client.keys.get_by_id(id=name, params=params)
93 except Exception:
94 return None
95
96 if kvp:
97 return kvp.value
98
99 return None
100
101 def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
102 """
103 Set a value for the provided key.
104
105 By default, value is set in a namespace local to the pack/class. If you want to
106 set a global value, pass local=False to this method.
107
108 :param name: Key name.
109 :type name: ``str``
110
111 :param value: Key value.
112 :type value: ``str``
113
114 :param ttl: Optional TTL (in seconds).
115 :type ttl: ``int``
116
117 :param local: Set value in a namespace local to the pack/class. Defaults to True.
118 :type: local: ``bool``
119
120 :param scope: Scope under which to place the item. Defaults to system scope.
121 :type: local: ``str``
122
123 :param encrypt: Encrypyt the value when saving. Defaults to False.
124 :type: local: ``bool``
125
126 :return: ``True`` on success, ``False`` otherwise.
127 :rtype: ``bool``
128 """
129 if scope != SYSTEM_SCOPE:
130 raise ValueError('Scope %s is unsupported.', scope)
131
132 name = self._get_full_key_name(name=name, local=local)
133
134 value = str(value)
135 client = self._get_api_client()
136
137 self._logger.audit('Setting value in the datastore (name=%s)', name)
138
139 instance = KeyValuePair()
140 instance.id = name
141 instance.name = name
142 instance.value = value
143 instance.scope = scope
144 if encrypt:
145 instance.secret = True
146
147 if ttl:
148 instance.ttl = ttl
149
150 client.keys.update(instance=instance)
151 return True
152
153 def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
154 """
155 Delete the provided key.
156
157 By default, value is deleted from a namespace local to the pack/class. If you want to
158 delete a global value, pass local=False to this method.
159
160 :param name: Name of the key to delete.
161 :type name: ``str``
162
163 :param local: Delete a value in a namespace local to the pack/class. Defaults to True.
164 :type: local: ``bool``
165
166 :param scope: Scope under which item is saved. Defaults to system scope.
167 :type: local: ``str``
168
169 :return: ``True`` on success, ``False`` otherwise.
170 :rtype: ``bool``
171 """
172 if scope != SYSTEM_SCOPE:
173 raise ValueError('Scope %s is unsupported.', scope)
174
175 name = self._get_full_key_name(name=name, local=local)
176
177 client = self._get_api_client()
178
179 instance = KeyValuePair()
180 instance.id = name
181 instance.name = name
182
183 self._logger.audit('Deleting value from the datastore (name=%s)', name)
184
185 try:
186 params = {'scope': scope}
187 client.keys.delete(instance=instance, params=params)
188 except Exception:
189 return False
190
191 return True
192
193 def _get_api_client(self):
194 """
195 Retrieve API client instance.
196 """
197 if not self._client:
198 ttl = (24 * 60 * 60)
199 temporary_token = create_token(username=self._api_username, ttl=ttl)
200 api_url = get_full_public_api_url()
201 self._client = Client(api_url=api_url, token=temporary_token.token)
202
203 return self._client
204
205 def _get_full_key_name(self, name, local):
206 """
207 Retrieve a full key name.
208
209 :rtype: ``str``
210 """
211 if local:
212 name = self._get_key_name_with_prefix(name=name)
213
214 return name
215
216 def _get_full_key_prefix(self, local, prefix=None):
217 if local:
218 key_prefix = self._get_local_key_name_prefix()
219
220 if prefix:
221 key_prefix += prefix
222 else:
223 key_prefix = prefix
224
225 return key_prefix
226
227 def _get_local_key_name_prefix(self):
228 """
229 Retrieve key prefix which is local to this pack/class.
230 """
231 key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR
232 return key_prefix
233
234 def _get_key_name_with_prefix(self, name):
235 """
236 Retrieve a full key name which is local to the current pack/class.
237
238 :param name: Base datastore key name.
239 :type name: ``str``
240
241 :rtype: ``str``
242 """
243 prefix = self._get_datastore_key_prefix()
244 full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name
245 return full_name
246
247 def _get_datastore_key_prefix(self):
248 prefix = '%s.%s' % (self._pack_name, self._class_name)
249 return prefix
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/st2common/st2common/services/datastore.py b/st2common/st2common/services/datastore.py
--- a/st2common/st2common/services/datastore.py
+++ b/st2common/st2common/services/datastore.py
@@ -13,10 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from datetime import timedelta
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
+from st2common.util.date import get_datetime_utc_now
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
@@ -34,6 +36,7 @@
self._logger = logger
self._client = None
+ self._token_expire = get_datetime_utc_now()
##################################
# Methods for datastore management
@@ -90,7 +93,12 @@
try:
params = {'decrypt': str(decrypt).lower(), 'scope': scope}
kvp = client.keys.get_by_id(id=name, params=params)
- except Exception:
+ except Exception as e:
+ self._logger.exception(
+ 'Exception retrieving value from datastore (name=%s): %s',
+ name,
+ e
+ )
return None
if kvp:
@@ -185,7 +193,12 @@
try:
params = {'scope': scope}
client.keys.delete(instance=instance, params=params)
- except Exception:
+ except Exception as e:
+ self._logger.exception(
+ 'Exception deleting value from datastore (name=%s): %s',
+ name,
+ e
+ )
return False
return True
@@ -194,8 +207,12 @@
"""
Retrieve API client instance.
"""
- if not self._client:
+ token_expire = self._token_expire <= get_datetime_utc_now()
+
+ if not self._client or token_expire:
+ self._logger.audit('Creating new Client object.')
ttl = (24 * 60 * 60)
+ self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
|
{"golden_diff": "diff --git a/st2common/st2common/services/datastore.py b/st2common/st2common/services/datastore.py\n--- a/st2common/st2common/services/datastore.py\n+++ b/st2common/st2common/services/datastore.py\n@@ -13,10 +13,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from datetime import timedelta\n from st2client.client import Client\n from st2client.models import KeyValuePair\n from st2common.services.access import create_token\n from st2common.util.api import get_full_public_api_url\n+from st2common.util.date import get_datetime_utc_now\n from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE\n \n \n@@ -34,6 +36,7 @@\n self._logger = logger\n \n self._client = None\n+ self._token_expire = get_datetime_utc_now()\n \n ##################################\n # Methods for datastore management\n@@ -90,7 +93,12 @@\n try:\n params = {'decrypt': str(decrypt).lower(), 'scope': scope}\n kvp = client.keys.get_by_id(id=name, params=params)\n- except Exception:\n+ except Exception as e:\n+ self._logger.exception(\n+ 'Exception retrieving value from datastore (name=%s): %s',\n+ name,\n+ e\n+ )\n return None\n \n if kvp:\n@@ -185,7 +193,12 @@\n try:\n params = {'scope': scope}\n client.keys.delete(instance=instance, params=params)\n- except Exception:\n+ except Exception as e:\n+ self._logger.exception(\n+ 'Exception deleting value from datastore (name=%s): %s',\n+ name,\n+ e\n+ )\n return False\n \n return True\n@@ -194,8 +207,12 @@\n \"\"\"\n Retrieve API client instance.\n \"\"\"\n- if not self._client:\n+ token_expire = self._token_expire <= get_datetime_utc_now()\n+\n+ if not self._client or token_expire:\n+ self._logger.audit('Creating new Client object.')\n ttl = (24 * 60 * 60)\n+ self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)\n temporary_token = create_token(username=self._api_username, ttl=ttl)\n api_url = get_full_public_api_url()\n self._client = Client(api_url=api_url, token=temporary_token.token)\n", "issue": "Sensor fails to retrieve kv object after some time.\nst2 2.0.1\r\nmongo 3.2.10\r\n\r\nI have a polling sensor that queries an ES database and when a datapoint crosses a threshold, it sends an alert. Once alerted, it stores a key with a 24-hour ttl so that the sensor knows to skip alerts for the rest of the day. I also have a mechanism for silencing alerts with the same approach. This used to work well before I upgraded st2, but now after a few days of running st2 the sensor keeps alerting even though there\u2019s an `alerted` key in the keystore. When I restart st2 everything goes back to normal for a few days then the problem comes back. \r\n\r\nI can probably get permission to publish my entire pack, but here's the gist of the dispatch_trigger logic in my sensor:\r\n\r\nhttps://gist.github.com/pixelrebel/240ca95c480d1377175b3667de0411cb\r\n\r\nAnd here's the gist of my action chain that sets the `alerted` key.\r\n\r\nhttps://gist.github.com/pixelrebel/6def658dfd2ee43fe71ce7735db76b29\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2client.client import Client\nfrom st2client.models import KeyValuePair\nfrom st2common.services.access import create_token\nfrom st2common.util.api import get_full_public_api_url\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE\n\n\nclass DatastoreService(object):\n \"\"\"\n Class provides public methods for accessing datastore items.\n \"\"\"\n\n DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR\n\n def __init__(self, logger, pack_name, class_name, api_username):\n self._api_username = api_username\n self._pack_name = pack_name\n self._class_name = class_name\n self._logger = logger\n\n self._client = None\n\n ##################################\n # Methods for datastore management\n ##################################\n\n def list_values(self, local=True, prefix=None):\n \"\"\"\n Retrieve all the datastores items.\n\n :param local: List values from a namespace local to this pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param prefix: Optional key name prefix / startswith filter.\n :type prefix: ``str``\n\n :rtype: ``list`` of :class:`KeyValuePair`\n \"\"\"\n client = self._get_api_client()\n self._logger.audit('Retrieving all the value from the datastore')\n\n key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)\n kvps = client.keys.get_all(prefix=key_prefix)\n return kvps\n\n def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):\n \"\"\"\n Retrieve a value from the datastore for the provided key.\n\n By default, value is retrieved from the namespace local to the pack/class. If you want to\n retrieve a global value from a datastore, pass local=False to this method.\n\n :param name: Key name.\n :type name: ``str``\n\n :param local: Retrieve value from a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which item is saved. Defaults to system scope.\n :type: local: ``str``\n\n :param encrypt: Return the decrypted value. Defaults to False.\n :type: local: ``bool``\n\n :rtype: ``str`` or ``None``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.' % scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n client = self._get_api_client()\n self._logger.audit('Retrieving value from the datastore (name=%s)', name)\n\n try:\n params = {'decrypt': str(decrypt).lower(), 'scope': scope}\n kvp = client.keys.get_by_id(id=name, params=params)\n except Exception:\n return None\n\n if kvp:\n return kvp.value\n\n return None\n\n def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):\n \"\"\"\n Set a value for the provided key.\n\n By default, value is set in a namespace local to the pack/class. If you want to\n set a global value, pass local=False to this method.\n\n :param name: Key name.\n :type name: ``str``\n\n :param value: Key value.\n :type value: ``str``\n\n :param ttl: Optional TTL (in seconds).\n :type ttl: ``int``\n\n :param local: Set value in a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which to place the item. Defaults to system scope.\n :type: local: ``str``\n\n :param encrypt: Encrypyt the value when saving. Defaults to False.\n :type: local: ``bool``\n\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.', scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n value = str(value)\n client = self._get_api_client()\n\n self._logger.audit('Setting value in the datastore (name=%s)', name)\n\n instance = KeyValuePair()\n instance.id = name\n instance.name = name\n instance.value = value\n instance.scope = scope\n if encrypt:\n instance.secret = True\n\n if ttl:\n instance.ttl = ttl\n\n client.keys.update(instance=instance)\n return True\n\n def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):\n \"\"\"\n Delete the provided key.\n\n By default, value is deleted from a namespace local to the pack/class. If you want to\n delete a global value, pass local=False to this method.\n\n :param name: Name of the key to delete.\n :type name: ``str``\n\n :param local: Delete a value in a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which item is saved. Defaults to system scope.\n :type: local: ``str``\n\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.', scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n client = self._get_api_client()\n\n instance = KeyValuePair()\n instance.id = name\n instance.name = name\n\n self._logger.audit('Deleting value from the datastore (name=%s)', name)\n\n try:\n params = {'scope': scope}\n client.keys.delete(instance=instance, params=params)\n except Exception:\n return False\n\n return True\n\n def _get_api_client(self):\n \"\"\"\n Retrieve API client instance.\n \"\"\"\n if not self._client:\n ttl = (24 * 60 * 60)\n temporary_token = create_token(username=self._api_username, ttl=ttl)\n api_url = get_full_public_api_url()\n self._client = Client(api_url=api_url, token=temporary_token.token)\n\n return self._client\n\n def _get_full_key_name(self, name, local):\n \"\"\"\n Retrieve a full key name.\n\n :rtype: ``str``\n \"\"\"\n if local:\n name = self._get_key_name_with_prefix(name=name)\n\n return name\n\n def _get_full_key_prefix(self, local, prefix=None):\n if local:\n key_prefix = self._get_local_key_name_prefix()\n\n if prefix:\n key_prefix += prefix\n else:\n key_prefix = prefix\n\n return key_prefix\n\n def _get_local_key_name_prefix(self):\n \"\"\"\n Retrieve key prefix which is local to this pack/class.\n \"\"\"\n key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR\n return key_prefix\n\n def _get_key_name_with_prefix(self, name):\n \"\"\"\n Retrieve a full key name which is local to the current pack/class.\n\n :param name: Base datastore key name.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n prefix = self._get_datastore_key_prefix()\n full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name\n return full_name\n\n def _get_datastore_key_prefix(self):\n prefix = '%s.%s' % (self._pack_name, self._class_name)\n return prefix\n", "path": "st2common/st2common/services/datastore.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import timedelta\nfrom st2client.client import Client\nfrom st2client.models import KeyValuePair\nfrom st2common.services.access import create_token\nfrom st2common.util.api import get_full_public_api_url\nfrom st2common.util.date import get_datetime_utc_now\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE\n\n\nclass DatastoreService(object):\n \"\"\"\n Class provides public methods for accessing datastore items.\n \"\"\"\n\n DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR\n\n def __init__(self, logger, pack_name, class_name, api_username):\n self._api_username = api_username\n self._pack_name = pack_name\n self._class_name = class_name\n self._logger = logger\n\n self._client = None\n self._token_expire = get_datetime_utc_now()\n\n ##################################\n # Methods for datastore management\n ##################################\n\n def list_values(self, local=True, prefix=None):\n \"\"\"\n Retrieve all the datastores items.\n\n :param local: List values from a namespace local to this pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param prefix: Optional key name prefix / startswith filter.\n :type prefix: ``str``\n\n :rtype: ``list`` of :class:`KeyValuePair`\n \"\"\"\n client = self._get_api_client()\n self._logger.audit('Retrieving all the value from the datastore')\n\n key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)\n kvps = client.keys.get_all(prefix=key_prefix)\n return kvps\n\n def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):\n \"\"\"\n Retrieve a value from the datastore for the provided key.\n\n By default, value is retrieved from the namespace local to the pack/class. If you want to\n retrieve a global value from a datastore, pass local=False to this method.\n\n :param name: Key name.\n :type name: ``str``\n\n :param local: Retrieve value from a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which item is saved. Defaults to system scope.\n :type: local: ``str``\n\n :param encrypt: Return the decrypted value. Defaults to False.\n :type: local: ``bool``\n\n :rtype: ``str`` or ``None``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.' % scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n client = self._get_api_client()\n self._logger.audit('Retrieving value from the datastore (name=%s)', name)\n\n try:\n params = {'decrypt': str(decrypt).lower(), 'scope': scope}\n kvp = client.keys.get_by_id(id=name, params=params)\n except Exception as e:\n self._logger.exception(\n 'Exception retrieving value from datastore (name=%s): %s',\n name,\n e\n )\n return None\n\n if kvp:\n return kvp.value\n\n return None\n\n def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):\n \"\"\"\n Set a value for the provided key.\n\n By default, value is set in a namespace local to the pack/class. If you want to\n set a global value, pass local=False to this method.\n\n :param name: Key name.\n :type name: ``str``\n\n :param value: Key value.\n :type value: ``str``\n\n :param ttl: Optional TTL (in seconds).\n :type ttl: ``int``\n\n :param local: Set value in a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which to place the item. Defaults to system scope.\n :type: local: ``str``\n\n :param encrypt: Encrypyt the value when saving. Defaults to False.\n :type: local: ``bool``\n\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.', scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n value = str(value)\n client = self._get_api_client()\n\n self._logger.audit('Setting value in the datastore (name=%s)', name)\n\n instance = KeyValuePair()\n instance.id = name\n instance.name = name\n instance.value = value\n instance.scope = scope\n if encrypt:\n instance.secret = True\n\n if ttl:\n instance.ttl = ttl\n\n client.keys.update(instance=instance)\n return True\n\n def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):\n \"\"\"\n Delete the provided key.\n\n By default, value is deleted from a namespace local to the pack/class. If you want to\n delete a global value, pass local=False to this method.\n\n :param name: Name of the key to delete.\n :type name: ``str``\n\n :param local: Delete a value in a namespace local to the pack/class. Defaults to True.\n :type: local: ``bool``\n\n :param scope: Scope under which item is saved. Defaults to system scope.\n :type: local: ``str``\n\n :return: ``True`` on success, ``False`` otherwise.\n :rtype: ``bool``\n \"\"\"\n if scope != SYSTEM_SCOPE:\n raise ValueError('Scope %s is unsupported.', scope)\n\n name = self._get_full_key_name(name=name, local=local)\n\n client = self._get_api_client()\n\n instance = KeyValuePair()\n instance.id = name\n instance.name = name\n\n self._logger.audit('Deleting value from the datastore (name=%s)', name)\n\n try:\n params = {'scope': scope}\n client.keys.delete(instance=instance, params=params)\n except Exception as e:\n self._logger.exception(\n 'Exception deleting value from datastore (name=%s): %s',\n name,\n e\n )\n return False\n\n return True\n\n def _get_api_client(self):\n \"\"\"\n Retrieve API client instance.\n \"\"\"\n token_expire = self._token_expire <= get_datetime_utc_now()\n\n if not self._client or token_expire:\n self._logger.audit('Creating new Client object.')\n ttl = (24 * 60 * 60)\n self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)\n temporary_token = create_token(username=self._api_username, ttl=ttl)\n api_url = get_full_public_api_url()\n self._client = Client(api_url=api_url, token=temporary_token.token)\n\n return self._client\n\n def _get_full_key_name(self, name, local):\n \"\"\"\n Retrieve a full key name.\n\n :rtype: ``str``\n \"\"\"\n if local:\n name = self._get_key_name_with_prefix(name=name)\n\n return name\n\n def _get_full_key_prefix(self, local, prefix=None):\n if local:\n key_prefix = self._get_local_key_name_prefix()\n\n if prefix:\n key_prefix += prefix\n else:\n key_prefix = prefix\n\n return key_prefix\n\n def _get_local_key_name_prefix(self):\n \"\"\"\n Retrieve key prefix which is local to this pack/class.\n \"\"\"\n key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR\n return key_prefix\n\n def _get_key_name_with_prefix(self, name):\n \"\"\"\n Retrieve a full key name which is local to the current pack/class.\n\n :param name: Base datastore key name.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n prefix = self._get_datastore_key_prefix()\n full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name\n return full_name\n\n def _get_datastore_key_prefix(self):\n prefix = '%s.%s' % (self._pack_name, self._class_name)\n return prefix\n", "path": "st2common/st2common/services/datastore.py"}]}
| 3,017 | 551 |
gh_patches_debug_59115
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
requests not shown on updates page
The https://bodhi.fedoraproject.org/updates/?user=xxx page show all updates and their status, but not the requests (e.g. "submitted to stable") made for an update.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/services/updates.py`
Content:
```
1 # This program is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU General Public License
3 # as published by the Free Software Foundation; either version 2
4 # of the License, or (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10 #
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14
15 import math
16
17 from cornice import Service
18 from pyramid.security import has_permission
19 from sqlalchemy import func, distinct
20 from sqlalchemy.sql import or_
21
22 from bodhi import log
23 from bodhi.exceptions import BodhiException, LockedUpdateException
24 from bodhi.models import Update, Build, Bug, CVE, Package, UpdateRequest
25 import bodhi.schemas
26 import bodhi.security
27 from bodhi.validators import (
28 validate_nvrs,
29 validate_uniqueness,
30 validate_build_tags,
31 validate_acls,
32 validate_builds,
33 validate_enums,
34 validate_releases,
35 validate_release,
36 validate_username,
37 validate_update_id,
38 validate_requirements,
39 )
40
41
42 update = Service(name='update', path='/updates/{id}',
43 validators=(validate_update_id,),
44 description='Update submission service',
45 # This acl only checks if the user is an admin or a commiters to the packages,
46 # where as the validate_acls method which is attached to the @post on this
47 # services does this as well as checking against the groups. So, this acl
48 # should be unnecessary at the moment.
49 #acl=bodhi.security.package_maintainers_only_acl,
50 acl=bodhi.security.packagers_allowed_acl,
51 cors_origins=bodhi.security.cors_origins_ro)
52
53 update_edit = Service(name='update_edit', path='/updates/{id}/edit',
54 validators=(validate_update_id,),
55 description='Update submission service',
56 #acl=bodhi.security.package_maintainers_only_acl,
57 acl=bodhi.security.packagers_allowed_acl,
58 cors_origins=bodhi.security.cors_origins_rw)
59
60 updates = Service(name='updates', path='/updates/',
61 acl=bodhi.security.packagers_allowed_acl,
62 description='Update submission service',
63 cors_origins=bodhi.security.cors_origins_ro)
64
65 update_request = Service(name='update_request', path='/updates/{id}/request',
66 description='Update request service',
67 #acl=bodhi.security.package_maintainers_only_acl,
68 acl=bodhi.security.packagers_allowed_acl,
69 cors_origins=bodhi.security.cors_origins_rw)
70
71
72 @update.get(accept=('application/json', 'text/json'), renderer='json')
73 @update.get(accept=('application/javascript'), renderer='jsonp')
74 @update.get(accept="text/html", renderer="update.html")
75 def get_update(request):
76 """Return a single update from an id, title, or alias"""
77 can_edit = has_permission('edit', request.context, request)
78 return dict(update=request.validated['update'], can_edit=can_edit)
79
80
81 @update_edit.get(accept="text/html", renderer="new_update.html")
82 def get_update_for_editing(request):
83 """Return a single update from an id, title, or alias for the edit form"""
84 return dict(
85 update=request.validated['update'],
86 types=reversed(bodhi.models.UpdateType.values()),
87 severities=reversed(bodhi.models.UpdateSeverity.values()),
88 suggestions=reversed(bodhi.models.UpdateSuggestion.values()),
89 )
90
91
92 @update_request.post(schema=bodhi.schemas.UpdateRequestSchema,
93 validators=(
94 validate_enums,
95 validate_update_id,
96 validate_build_tags,
97 validate_acls,
98 ),
99 permission='edit', renderer='json')
100 def set_request(request):
101 """Sets a specific :class:`bodhi.models.UpdateRequest` on a given update"""
102 update = request.validated['update']
103 action = request.validated['request']
104
105 if update.locked:
106 request.errors.add('body', 'request',
107 "Can't change request on a locked update")
108 return
109
110 if action is UpdateRequest.stable:
111 settings = request.registry.settings
112 result, reason = update.check_requirements(request.db, settings)
113 if not result:
114 request.errors.add('body', 'request',
115 'Requirement not met %s' % reason)
116 return
117
118 try:
119 update.set_request(action, request.user.name)
120 except BodhiException as e:
121 request.errors.add('body', 'request', e.message)
122
123 return dict(update=update)
124
125
126 @updates.get(schema=bodhi.schemas.ListUpdateSchema,
127 accept=('application/json', 'text/json'), renderer='json',
128 validators=(validate_release, validate_releases,
129 validate_enums, validate_username))
130 @updates.get(schema=bodhi.schemas.ListUpdateSchema,
131 accept=('application/javascript'), renderer='jsonp',
132 validators=(validate_release, validate_releases,
133 validate_enums, validate_username))
134 @updates.get(schema=bodhi.schemas.ListUpdateSchema,
135 accept=('application/atom+xml'), renderer='rss',
136 validators=(validate_release, validate_releases,
137 validate_enums, validate_username))
138 @updates.get(schema=bodhi.schemas.ListUpdateSchema,
139 accept=('text/html'), renderer='updates.html',
140 validators=(validate_release, validate_releases,
141 validate_enums, validate_username))
142 def query_updates(request):
143 db = request.db
144 data = request.validated
145 query = db.query(Update)
146
147 log.debug('query(%s)' % data)
148
149 approved_since = data.get('approved_since')
150 if approved_since is not None:
151 query = query.filter(Update.date_approved >= approved_since)
152
153 bugs = data.get('bugs')
154 if bugs is not None:
155 query = query.join(Update.bugs)
156 query = query.filter(or_(*[Bug.bug_id==bug_id for bug_id in bugs]))
157
158 critpath = data.get('critpath')
159 if critpath is not None:
160 query = query.filter(Update.critpath==critpath)
161
162 cves = data.get('cves')
163 if cves is not None:
164 query = query.join(Update.cves)
165 query = query.filter(or_(*[CVE.cve_id==cve_id for cve_id in cves]))
166
167 like = data.get('like')
168 if like is not None:
169 query = query.filter(or_(*[
170 Update.title.like('%%%s%%' % like)
171 ]))
172
173 locked = data.get('locked')
174 if locked is not None:
175 query = query.filter(Update.locked==locked)
176
177 modified_since = data.get('modified_since')
178 if modified_since is not None:
179 query = query.filter(Update.date_modified >= modified_since)
180
181 packages = data.get('packages')
182 if packages is not None:
183 query = query.join(Update.builds).join(Build.package)
184 query = query.filter(or_(*[Package.name==pkg for pkg in packages]))
185
186 builds = data.get('builds')
187 if builds is not None:
188 query = query.join(Update.builds)
189 query = query.filter(or_(*[Build.nvr==build for build in builds]))
190
191 pushed = data.get('pushed')
192 if pushed is not None:
193 query = query.filter(Update.pushed==pushed)
194
195 pushed_since = data.get('pushed_since')
196 if pushed_since is not None:
197 query = query.filter(Update.date_pushed >= pushed_since)
198
199 releases = data.get('releases')
200 if releases is not None:
201 query = query.filter(or_(*[Update.release==r for r in releases]))
202
203 # This singular version of the plural "releases" is purely for bodhi1
204 # backwards compat (mostly for RSS feeds) - threebean
205 release = data.get('release')
206 if release is not None:
207 query = query.filter(Update.release==release)
208
209 req = data.get('request')
210 if req is not None:
211 query = query.filter(Update.request==req)
212
213 severity = data.get('severity')
214 if severity is not None:
215 query = query.filter(Update.severity==severity)
216
217 status = data.get('status')
218 if status is not None:
219 query = query.filter(Update.status==status)
220
221 submitted_since = data.get('submitted_since')
222 if submitted_since is not None:
223 query = query.filter(Update.date_submitted >= submitted_since)
224
225 suggest = data.get('suggest')
226 if suggest is not None:
227 query = query.filter(Update.suggest==suggest)
228
229 type = data.get('type')
230 if type is not None:
231 query = query.filter(Update.type==type)
232
233 user = data.get('user')
234 if user is not None:
235 query = query.filter(Update.user==user)
236
237 updateid = data.get('updateid')
238 if updateid is not None:
239 query = query.filter(or_(*[Update.alias==uid for uid in updateid]))
240 alias = data.get('alias')
241 if alias is not None:
242 query = query.filter(or_(*[Update.alias==a for a in alias]))
243
244 query = query.order_by(Update.date_submitted.desc())
245
246 # We can't use ``query.count()`` here because it is naive with respect to
247 # all the joins that we're doing above.
248 count_query = query.statement\
249 .with_only_columns([func.count(distinct(Update.id))])\
250 .order_by(None)
251 total = db.execute(count_query).scalar()
252
253 page = data.get('page')
254 rows_per_page = data.get('rows_per_page')
255 pages = int(math.ceil(total / float(rows_per_page)))
256 query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
257
258 return dict(
259 updates=query.all(),
260 page=page,
261 pages=pages,
262 rows_per_page=rows_per_page,
263 total=total,
264 chrome=data.get('chrome'),
265 display_user=data.get('display_user'),
266 )
267
268
269 @updates.post(schema=bodhi.schemas.SaveUpdateSchema,
270 permission='create', renderer='json',
271 validators=(
272 validate_nvrs,
273 validate_builds,
274 validate_uniqueness,
275 validate_build_tags,
276 validate_acls,
277 validate_enums,
278 validate_requirements,
279 ))
280 def new_update(request):
281 """ Save an update.
282
283 This entails either creating a new update, or editing an existing one. To
284 edit an existing update, the update's original title must be specified in
285 the ``edited`` parameter.
286 """
287 data = request.validated
288 log.debug('validated = %s' % data)
289
290 # This has already been validated at this point, but we need to ditch
291 # it since the models don't care about a csrf argument.
292 data.pop('csrf_token')
293
294 try:
295 if data.get('edited'):
296 log.info('Editing update: %s' % data['edited'])
297 up = Update.edit(request, data)
298 else:
299 log.info('Creating new update: %s' % ' '.join(data['builds']))
300 up = Update.new(request, data)
301 log.debug('update = %r' % up)
302
303 except LockedUpdateException as e:
304 request.errors.add('body', 'builds', "%s" % e)
305 return
306
307 except Exception as e:
308 log.exception(e)
309 request.errors.add('body', 'builds', 'Unable to create update')
310 return
311
312 up.obsolete_older_updates(request)
313
314 return up
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bodhi/services/updates.py b/bodhi/services/updates.py
--- a/bodhi/services/updates.py
+++ b/bodhi/services/updates.py
@@ -262,7 +262,8 @@
rows_per_page=rows_per_page,
total=total,
chrome=data.get('chrome'),
- display_user=data.get('display_user'),
+ display_user=data.get('display_user', False),
+ display_request=data.get('display_request', True),
)
|
{"golden_diff": "diff --git a/bodhi/services/updates.py b/bodhi/services/updates.py\n--- a/bodhi/services/updates.py\n+++ b/bodhi/services/updates.py\n@@ -262,7 +262,8 @@\n rows_per_page=rows_per_page,\n total=total,\n chrome=data.get('chrome'),\n- display_user=data.get('display_user'),\n+ display_user=data.get('display_user', False),\n+ display_request=data.get('display_request', True),\n )\n", "issue": "requests not shown on updates page\nThe https://bodhi.fedoraproject.org/updates/?user=xxx page show all updates and their status, but not the requests (e.g. \"submitted to stable\") made for an update.\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport math\n\nfrom cornice import Service\nfrom pyramid.security import has_permission\nfrom sqlalchemy import func, distinct\nfrom sqlalchemy.sql import or_\n\nfrom bodhi import log\nfrom bodhi.exceptions import BodhiException, LockedUpdateException\nfrom bodhi.models import Update, Build, Bug, CVE, Package, UpdateRequest\nimport bodhi.schemas\nimport bodhi.security\nfrom bodhi.validators import (\n validate_nvrs,\n validate_uniqueness,\n validate_build_tags,\n validate_acls,\n validate_builds,\n validate_enums,\n validate_releases,\n validate_release,\n validate_username,\n validate_update_id,\n validate_requirements,\n)\n\n\nupdate = Service(name='update', path='/updates/{id}',\n validators=(validate_update_id,),\n description='Update submission service',\n # This acl only checks if the user is an admin or a commiters to the packages,\n # where as the validate_acls method which is attached to the @post on this\n # services does this as well as checking against the groups. So, this acl\n # should be unnecessary at the moment.\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_ro)\n\nupdate_edit = Service(name='update_edit', path='/updates/{id}/edit',\n validators=(validate_update_id,),\n description='Update submission service',\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_rw)\n\nupdates = Service(name='updates', path='/updates/',\n acl=bodhi.security.packagers_allowed_acl,\n description='Update submission service',\n cors_origins=bodhi.security.cors_origins_ro)\n\nupdate_request = Service(name='update_request', path='/updates/{id}/request',\n description='Update request service',\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_rw)\n\n\[email protected](accept=('application/json', 'text/json'), renderer='json')\[email protected](accept=('application/javascript'), renderer='jsonp')\[email protected](accept=\"text/html\", renderer=\"update.html\")\ndef get_update(request):\n \"\"\"Return a single update from an id, title, or alias\"\"\"\n can_edit = has_permission('edit', request.context, request)\n return dict(update=request.validated['update'], can_edit=can_edit)\n\n\n@update_edit.get(accept=\"text/html\", renderer=\"new_update.html\")\ndef get_update_for_editing(request):\n \"\"\"Return a single update from an id, title, or alias for the edit form\"\"\"\n return dict(\n update=request.validated['update'],\n types=reversed(bodhi.models.UpdateType.values()),\n severities=reversed(bodhi.models.UpdateSeverity.values()),\n suggestions=reversed(bodhi.models.UpdateSuggestion.values()),\n )\n\n\n@update_request.post(schema=bodhi.schemas.UpdateRequestSchema,\n validators=(\n validate_enums,\n validate_update_id,\n validate_build_tags,\n validate_acls,\n ),\n permission='edit', renderer='json')\ndef set_request(request):\n \"\"\"Sets a specific :class:`bodhi.models.UpdateRequest` on a given update\"\"\"\n update = request.validated['update']\n action = request.validated['request']\n\n if update.locked:\n request.errors.add('body', 'request',\n \"Can't change request on a locked update\")\n return\n\n if action is UpdateRequest.stable:\n settings = request.registry.settings\n result, reason = update.check_requirements(request.db, settings)\n if not result:\n request.errors.add('body', 'request',\n 'Requirement not met %s' % reason)\n return\n\n try:\n update.set_request(action, request.user.name)\n except BodhiException as e:\n request.errors.add('body', 'request', e.message)\n\n return dict(update=update)\n\n\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/json', 'text/json'), renderer='json',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/javascript'), renderer='jsonp',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/atom+xml'), renderer='rss',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('text/html'), renderer='updates.html',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\ndef query_updates(request):\n db = request.db\n data = request.validated\n query = db.query(Update)\n\n log.debug('query(%s)' % data)\n\n approved_since = data.get('approved_since')\n if approved_since is not None:\n query = query.filter(Update.date_approved >= approved_since)\n\n bugs = data.get('bugs')\n if bugs is not None:\n query = query.join(Update.bugs)\n query = query.filter(or_(*[Bug.bug_id==bug_id for bug_id in bugs]))\n\n critpath = data.get('critpath')\n if critpath is not None:\n query = query.filter(Update.critpath==critpath)\n\n cves = data.get('cves')\n if cves is not None:\n query = query.join(Update.cves)\n query = query.filter(or_(*[CVE.cve_id==cve_id for cve_id in cves]))\n\n like = data.get('like')\n if like is not None:\n query = query.filter(or_(*[\n Update.title.like('%%%s%%' % like)\n ]))\n\n locked = data.get('locked')\n if locked is not None:\n query = query.filter(Update.locked==locked)\n\n modified_since = data.get('modified_since')\n if modified_since is not None:\n query = query.filter(Update.date_modified >= modified_since)\n\n packages = data.get('packages')\n if packages is not None:\n query = query.join(Update.builds).join(Build.package)\n query = query.filter(or_(*[Package.name==pkg for pkg in packages]))\n\n builds = data.get('builds')\n if builds is not None:\n query = query.join(Update.builds)\n query = query.filter(or_(*[Build.nvr==build for build in builds]))\n\n pushed = data.get('pushed')\n if pushed is not None:\n query = query.filter(Update.pushed==pushed)\n\n pushed_since = data.get('pushed_since')\n if pushed_since is not None:\n query = query.filter(Update.date_pushed >= pushed_since)\n\n releases = data.get('releases')\n if releases is not None:\n query = query.filter(or_(*[Update.release==r for r in releases]))\n\n # This singular version of the plural \"releases\" is purely for bodhi1\n # backwards compat (mostly for RSS feeds) - threebean\n release = data.get('release')\n if release is not None:\n query = query.filter(Update.release==release)\n\n req = data.get('request')\n if req is not None:\n query = query.filter(Update.request==req)\n\n severity = data.get('severity')\n if severity is not None:\n query = query.filter(Update.severity==severity)\n\n status = data.get('status')\n if status is not None:\n query = query.filter(Update.status==status)\n\n submitted_since = data.get('submitted_since')\n if submitted_since is not None:\n query = query.filter(Update.date_submitted >= submitted_since)\n\n suggest = data.get('suggest')\n if suggest is not None:\n query = query.filter(Update.suggest==suggest)\n\n type = data.get('type')\n if type is not None:\n query = query.filter(Update.type==type)\n\n user = data.get('user')\n if user is not None:\n query = query.filter(Update.user==user)\n\n updateid = data.get('updateid')\n if updateid is not None:\n query = query.filter(or_(*[Update.alias==uid for uid in updateid]))\n alias = data.get('alias')\n if alias is not None:\n query = query.filter(or_(*[Update.alias==a for a in alias]))\n\n query = query.order_by(Update.date_submitted.desc())\n\n # We can't use ``query.count()`` here because it is naive with respect to\n # all the joins that we're doing above.\n count_query = query.statement\\\n .with_only_columns([func.count(distinct(Update.id))])\\\n .order_by(None)\n total = db.execute(count_query).scalar()\n\n page = data.get('page')\n rows_per_page = data.get('rows_per_page')\n pages = int(math.ceil(total / float(rows_per_page)))\n query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)\n\n return dict(\n updates=query.all(),\n page=page,\n pages=pages,\n rows_per_page=rows_per_page,\n total=total,\n chrome=data.get('chrome'),\n display_user=data.get('display_user'),\n )\n\n\[email protected](schema=bodhi.schemas.SaveUpdateSchema,\n permission='create', renderer='json',\n validators=(\n validate_nvrs,\n validate_builds,\n validate_uniqueness,\n validate_build_tags,\n validate_acls,\n validate_enums,\n validate_requirements,\n ))\ndef new_update(request):\n \"\"\" Save an update.\n\n This entails either creating a new update, or editing an existing one. To\n edit an existing update, the update's original title must be specified in\n the ``edited`` parameter.\n \"\"\"\n data = request.validated\n log.debug('validated = %s' % data)\n\n # This has already been validated at this point, but we need to ditch\n # it since the models don't care about a csrf argument.\n data.pop('csrf_token')\n\n try:\n if data.get('edited'):\n log.info('Editing update: %s' % data['edited'])\n up = Update.edit(request, data)\n else:\n log.info('Creating new update: %s' % ' '.join(data['builds']))\n up = Update.new(request, data)\n log.debug('update = %r' % up)\n\n except LockedUpdateException as e:\n request.errors.add('body', 'builds', \"%s\" % e)\n return\n\n except Exception as e:\n log.exception(e)\n request.errors.add('body', 'builds', 'Unable to create update')\n return\n\n up.obsolete_older_updates(request)\n\n return up\n", "path": "bodhi/services/updates.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport math\n\nfrom cornice import Service\nfrom pyramid.security import has_permission\nfrom sqlalchemy import func, distinct\nfrom sqlalchemy.sql import or_\n\nfrom bodhi import log\nfrom bodhi.exceptions import BodhiException, LockedUpdateException\nfrom bodhi.models import Update, Build, Bug, CVE, Package, UpdateRequest\nimport bodhi.schemas\nimport bodhi.security\nfrom bodhi.validators import (\n validate_nvrs,\n validate_uniqueness,\n validate_build_tags,\n validate_acls,\n validate_builds,\n validate_enums,\n validate_releases,\n validate_release,\n validate_username,\n validate_update_id,\n validate_requirements,\n)\n\n\nupdate = Service(name='update', path='/updates/{id}',\n validators=(validate_update_id,),\n description='Update submission service',\n # This acl only checks if the user is an admin or a commiters to the packages,\n # where as the validate_acls method which is attached to the @post on this\n # services does this as well as checking against the groups. So, this acl\n # should be unnecessary at the moment.\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_ro)\n\nupdate_edit = Service(name='update_edit', path='/updates/{id}/edit',\n validators=(validate_update_id,),\n description='Update submission service',\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_rw)\n\nupdates = Service(name='updates', path='/updates/',\n acl=bodhi.security.packagers_allowed_acl,\n description='Update submission service',\n cors_origins=bodhi.security.cors_origins_ro)\n\nupdate_request = Service(name='update_request', path='/updates/{id}/request',\n description='Update request service',\n #acl=bodhi.security.package_maintainers_only_acl,\n acl=bodhi.security.packagers_allowed_acl,\n cors_origins=bodhi.security.cors_origins_rw)\n\n\[email protected](accept=('application/json', 'text/json'), renderer='json')\[email protected](accept=('application/javascript'), renderer='jsonp')\[email protected](accept=\"text/html\", renderer=\"update.html\")\ndef get_update(request):\n \"\"\"Return a single update from an id, title, or alias\"\"\"\n can_edit = has_permission('edit', request.context, request)\n return dict(update=request.validated['update'], can_edit=can_edit)\n\n\n@update_edit.get(accept=\"text/html\", renderer=\"new_update.html\")\ndef get_update_for_editing(request):\n \"\"\"Return a single update from an id, title, or alias for the edit form\"\"\"\n return dict(\n update=request.validated['update'],\n types=reversed(bodhi.models.UpdateType.values()),\n severities=reversed(bodhi.models.UpdateSeverity.values()),\n suggestions=reversed(bodhi.models.UpdateSuggestion.values()),\n )\n\n\n@update_request.post(schema=bodhi.schemas.UpdateRequestSchema,\n validators=(\n validate_enums,\n validate_update_id,\n validate_build_tags,\n validate_acls,\n ),\n permission='edit', renderer='json')\ndef set_request(request):\n \"\"\"Sets a specific :class:`bodhi.models.UpdateRequest` on a given update\"\"\"\n update = request.validated['update']\n action = request.validated['request']\n\n if update.locked:\n request.errors.add('body', 'request',\n \"Can't change request on a locked update\")\n return\n\n if action is UpdateRequest.stable:\n settings = request.registry.settings\n result, reason = update.check_requirements(request.db, settings)\n if not result:\n request.errors.add('body', 'request',\n 'Requirement not met %s' % reason)\n return\n\n try:\n update.set_request(action, request.user.name)\n except BodhiException as e:\n request.errors.add('body', 'request', e.message)\n\n return dict(update=update)\n\n\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/json', 'text/json'), renderer='json',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/javascript'), renderer='jsonp',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('application/atom+xml'), renderer='rss',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\[email protected](schema=bodhi.schemas.ListUpdateSchema,\n accept=('text/html'), renderer='updates.html',\n validators=(validate_release, validate_releases,\n validate_enums, validate_username))\ndef query_updates(request):\n db = request.db\n data = request.validated\n query = db.query(Update)\n\n log.debug('query(%s)' % data)\n\n approved_since = data.get('approved_since')\n if approved_since is not None:\n query = query.filter(Update.date_approved >= approved_since)\n\n bugs = data.get('bugs')\n if bugs is not None:\n query = query.join(Update.bugs)\n query = query.filter(or_(*[Bug.bug_id==bug_id for bug_id in bugs]))\n\n critpath = data.get('critpath')\n if critpath is not None:\n query = query.filter(Update.critpath==critpath)\n\n cves = data.get('cves')\n if cves is not None:\n query = query.join(Update.cves)\n query = query.filter(or_(*[CVE.cve_id==cve_id for cve_id in cves]))\n\n like = data.get('like')\n if like is not None:\n query = query.filter(or_(*[\n Update.title.like('%%%s%%' % like)\n ]))\n\n locked = data.get('locked')\n if locked is not None:\n query = query.filter(Update.locked==locked)\n\n modified_since = data.get('modified_since')\n if modified_since is not None:\n query = query.filter(Update.date_modified >= modified_since)\n\n packages = data.get('packages')\n if packages is not None:\n query = query.join(Update.builds).join(Build.package)\n query = query.filter(or_(*[Package.name==pkg for pkg in packages]))\n\n builds = data.get('builds')\n if builds is not None:\n query = query.join(Update.builds)\n query = query.filter(or_(*[Build.nvr==build for build in builds]))\n\n pushed = data.get('pushed')\n if pushed is not None:\n query = query.filter(Update.pushed==pushed)\n\n pushed_since = data.get('pushed_since')\n if pushed_since is not None:\n query = query.filter(Update.date_pushed >= pushed_since)\n\n releases = data.get('releases')\n if releases is not None:\n query = query.filter(or_(*[Update.release==r for r in releases]))\n\n # This singular version of the plural \"releases\" is purely for bodhi1\n # backwards compat (mostly for RSS feeds) - threebean\n release = data.get('release')\n if release is not None:\n query = query.filter(Update.release==release)\n\n req = data.get('request')\n if req is not None:\n query = query.filter(Update.request==req)\n\n severity = data.get('severity')\n if severity is not None:\n query = query.filter(Update.severity==severity)\n\n status = data.get('status')\n if status is not None:\n query = query.filter(Update.status==status)\n\n submitted_since = data.get('submitted_since')\n if submitted_since is not None:\n query = query.filter(Update.date_submitted >= submitted_since)\n\n suggest = data.get('suggest')\n if suggest is not None:\n query = query.filter(Update.suggest==suggest)\n\n type = data.get('type')\n if type is not None:\n query = query.filter(Update.type==type)\n\n user = data.get('user')\n if user is not None:\n query = query.filter(Update.user==user)\n\n updateid = data.get('updateid')\n if updateid is not None:\n query = query.filter(or_(*[Update.alias==uid for uid in updateid]))\n alias = data.get('alias')\n if alias is not None:\n query = query.filter(or_(*[Update.alias==a for a in alias]))\n\n query = query.order_by(Update.date_submitted.desc())\n\n # We can't use ``query.count()`` here because it is naive with respect to\n # all the joins that we're doing above.\n count_query = query.statement\\\n .with_only_columns([func.count(distinct(Update.id))])\\\n .order_by(None)\n total = db.execute(count_query).scalar()\n\n page = data.get('page')\n rows_per_page = data.get('rows_per_page')\n pages = int(math.ceil(total / float(rows_per_page)))\n query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)\n\n return dict(\n updates=query.all(),\n page=page,\n pages=pages,\n rows_per_page=rows_per_page,\n total=total,\n chrome=data.get('chrome'),\n display_user=data.get('display_user', False),\n display_request=data.get('display_request', True),\n )\n\n\[email protected](schema=bodhi.schemas.SaveUpdateSchema,\n permission='create', renderer='json',\n validators=(\n validate_nvrs,\n validate_builds,\n validate_uniqueness,\n validate_build_tags,\n validate_acls,\n validate_enums,\n validate_requirements,\n ))\ndef new_update(request):\n \"\"\" Save an update.\n\n This entails either creating a new update, or editing an existing one. To\n edit an existing update, the update's original title must be specified in\n the ``edited`` parameter.\n \"\"\"\n data = request.validated\n log.debug('validated = %s' % data)\n\n # This has already been validated at this point, but we need to ditch\n # it since the models don't care about a csrf argument.\n data.pop('csrf_token')\n\n try:\n if data.get('edited'):\n log.info('Editing update: %s' % data['edited'])\n up = Update.edit(request, data)\n else:\n log.info('Creating new update: %s' % ' '.join(data['builds']))\n up = Update.new(request, data)\n log.debug('update = %r' % up)\n\n except LockedUpdateException as e:\n request.errors.add('body', 'builds', \"%s\" % e)\n return\n\n except Exception as e:\n log.exception(e)\n request.errors.add('body', 'builds', 'Unable to create update')\n return\n\n up.obsolete_older_updates(request)\n\n return up\n", "path": "bodhi/services/updates.py"}]}
| 3,710 | 111 |
gh_patches_debug_20103
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-491
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Base1DPlot doesn't have a index_range trait, doesn't listen to changes upstream
[Base1DPlot](https://github.com/enthought/chaco/blob/021a921a48f317705a86b179a30395b6ccba7b42/chaco/base_1d_plot.py#L21) doesn't have a `index_range`. As a result, if the `index_range` trait is set on a `Plot` that contains a `Plot.plot_1d()`, the notification doesn't reach the 1D plot. In such case, one needs to explicitly set `Base1DPlot.index_mapper.range` to match.
Example:
```python
from numpy import linspace, sin
from traits.api import Button, HasTraits, Instance
from traitsui.api import View, Item, VGroup
from chaco.api import Plot, ArrayPlotData, DataRange1D
from enable.api import ComponentEditor
class MWE(HasTraits):
plot = Instance(Plot)
button1 = Button("zoom X (bug)")
button2 = Button("zoom X (workaround)")
traits_view = View(VGroup(
Item('plot', editor=ComponentEditor()),
Item('button1'),
Item('button2'),
))
def _plot_default(self):
x = linspace(-14, 14, 100)
y = sin(x) * x**3
u = linspace(-14, 14, 10)
plotdata = ArrayPlotData(x=x, y=y, u=u)
plot = Plot(plotdata)
plot.plot(("x", "y"), type="line", color="blue")
plot.plot_1d(("u"), type="scatter_1d", name="dots")
return plot
def _button1_changed(self):
value_range = DataRange1D()
value_range.set_bounds(-5, 5)
self.plot.index_range = value_range # DOESN'T reach the 1D plot!
def _button2_changed(self):
self._button1_changed()
# manually zoom the 1D plot
self.plot.plots["dots"][0].index_mapper.range = self.plot.index_range
if __name__ == "__main__":
MWE().configure_traits()
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/base_1d_plot.py`
Content:
```
1 """
2 Abstract base class for 1-D plots which only use one axis
3 """
4
5 from __future__ import absolute_import
6
7 # Standard library imports
8 from numpy import argsort, asarray
9
10 # Enthought library imports
11 from traits.api import (Any, Bool, Enum, Instance, Property, cached_property,
12 on_trait_change)
13
14 # local imports
15 from .abstract_plot_renderer import AbstractPlotRenderer
16 from .abstract_mapper import AbstractMapper
17 from .array_data_source import ArrayDataSource
18 from .base import reverse_map_1d
19
20
21 class Base1DPlot(AbstractPlotRenderer):
22 """ Base class for one-dimensional plots
23
24 This class provides a base for plots such as jitter plots, color bars,
25 single-axis scatter plots, and geophysical horizon and tops plots.
26
27 """
28
29 #: The data source of values
30 index = Instance(ArrayDataSource)
31
32 #: Screen mapper for index data.
33 index_mapper = Instance(AbstractMapper)
34
35 #: Corresponds to either **index_mapper** or None, depending on
36 #: the orientation of the plot.
37 x_mapper = Property(depends_on=['orientation', 'index_mapper'])
38
39 #: Corresponds to either **index_mapper** or None, depending on
40 #: the orientation of the plot.
41 y_mapper = Property(depends_on=['orientation', 'index_mapper'])
42
43 #: The orientation of the index axis.
44 orientation = Enum('v', 'h')
45
46 #: Should the plot go left-to-right or bottom-to-top (normal) or the reverse?
47 direction = Enum('normal', 'flipped')
48
49 #: Faux origin for the axes and other objects to look at
50 origin = Property(
51 Enum('bottom left', 'top left', 'bottom right', 'top right'),
52 depends_on=['orientation', 'direction']
53 )
54
55 #------------------------------------------------------------------------
56 # Private traits
57 #------------------------------------------------------------------------
58
59 #: flag whether the data cache is valid
60 _cache_valid = Bool(False)
61
62 #: cache of the index values in data space
63 _cached_data = Any()
64
65 #: cache of the sorted index values in data space
66 _cached_data_pts_sorted = Any()
67
68 #: cache of the sorted indices of the index values
69 _cached_data_argsort = Any()
70
71 #: flag whether the screen coordinates are valid
72 _screen_cache_valid = Bool(False)
73
74 #: cache holding the screen coordinates of the index values
75 _cached_screen_pts = Any()
76
77 #------------------------------------------------------------------------
78 # AbstractPlotRenderer interface
79 #------------------------------------------------------------------------
80
81 def map_screen(self, data_array):
82 """ Maps a 1D array of data points into screen space and returns it as
83 a 1D array.
84
85 Parameters
86 ----------
87
88 data_array : 1D array
89 An array of data-space values to be mapped to screen coordinates.
90
91 Returns
92 -------
93
94 screen_array : 1D array
95 An array of points in screen space, either x-values (if
96 orientation is 'h') or y-values (if orientation is 'v').
97
98 Notes
99 -----
100
101 Returning a 1D array is experimental, and may break some tools and
102 overlays. If needs be we can refactor so that it returns a 2D array.
103
104 """
105 # data_array is 1D array of length N
106 if len(data_array) == 0:
107 return []
108 return asarray(self.index_mapper.map_screen(data_array))
109
110 def map_data(self, screen_pts):
111 """ Maps 2D screen space points into the 1D index space of the plot.
112
113 Parameters
114 ----------
115
116 screen_pts : tuple of x-array, y-array
117 2 arrays (or values) screen space coordinates.
118
119 Returns
120 -------
121
122 data_array : 1D array
123 An array of points in data space corresponding to the screen-space
124 points.
125
126 """
127 x, y = screen_pts
128 if self.orientation == "v":
129 return asarray(self.index_mapper.map_data(y))
130 else:
131 return asarray(self.index_mapper.map_data(x))
132
133 def map_index(self, screen_pt, threshold=2.0, outside_returns_none=True,
134 index_only=True):
135 """ Maps a screen space point to an index into the plot's index array.
136
137 Parameters
138 ----------
139
140 screen_pts: tuple of x-array, y-array
141 2 arrays (or values) screen space coordinates.
142 threshold : float
143 Optional screen-space distance allowed between *screen_pt* and the
144 plot; if non-zero, then a *screen_pt* within this distance is
145 mapped to the neared plot index. (This feature is useful for sparse
146 data.)
147 outside_returns_none : Boolean
148 If True, then if *screen_pt* is outside the range of the data, the
149 method returns None. If False, it returns the nearest end index in
150 such a case.
151 index_only : Boolean
152 This is included for compatibity with the base class, but is
153 ignored, as it is always true for 1D plots.
154
155 Returns
156 -------
157
158 index : int
159 An index into the index array. If the input point cannot be mapped
160 to an index, then None is returned.
161
162 If *screen_pt* corresponds to multiple indices, then only the first
163 index is returned.
164
165 """
166 data_pt = self.map_data(screen_pt)
167
168 if ((data_pt < self.index_mapper.range.low) or \
169 (data_pt > self.index_mapper.range.high)) and \
170 outside_returns_none:
171 return None
172
173 if self._cached_data_pts_sorted is None:
174 self._cached_data_argsort = argsort(self._cached_data)
175 self._cached_data_pts_sorted = self._cached_data[self._cached_data_argsort]
176
177 # XXX better to just use argmin(abs(data - data_pt))?
178
179 data = self._cached_data_pts_sorted
180 try:
181 ndx = reverse_map_1d(data, data_pt, "ascending")
182 except IndexError:
183 if outside_returns_none:
184 return None
185 else:
186 if data_pt < data[0]:
187 return 0
188 else:
189 return len(data) - 1
190
191 orig_ndx = self._cached_data_argsort[ndx]
192
193 if threshold == 0.0:
194 return orig_ndx
195
196 screen_points = self._cached_screen_pts
197 x = screen_points[orig_ndx]
198 if self.orientation == 'v':
199 x0 = screen_pt[1]
200 else:
201 x0 = screen_pt[0]
202
203 if abs(x - x0) <= threshold:
204 return orig_ndx
205 else:
206 return None
207
208 #------------------------------------------------------------------------
209 # Private methods
210 #------------------------------------------------------------------------
211
212 def _compute_screen_coord(self):
213 """ Compute the screen coordinates of the index values """
214 if not self._screen_cache_valid:
215 self._gather_points()
216 pts = self.map_screen(self._cached_data)
217 self._cached_screen_pts = pts
218 self._screen_cache_valid = True
219 self._cached_data_pts_sorted = None
220 self._cached_data_argsort = None
221 return self._cached_screen_pts
222
223 def _gather_points(self):
224 """ Ensure that data cache is valid """
225 if self._cache_valid:
226 return
227 if not self.index:
228 return
229
230 index, index_mask = self.index.get_data_mask()
231 if len(index) == 0:
232 self._cached_data = []
233 self._cache_valid = True
234 return
235
236 self._cached_data = index
237 self._cache_valid = True
238 self._cached_screen_points = None
239 self._screen_cached_valid = False
240
241 def _update_mappers(self):
242 """ Update the mapper when the bounds, orientation or direction change
243 """
244 mapper = self.index_mapper
245 if mapper is None:
246 return
247
248 x = self.x
249 x2 = self.x2
250 y = self.y
251 y2 = self.y2
252
253 if self.orientation == 'h':
254 if self.direction == 'normal':
255 mapper.screen_bounds = (x, x2)
256 elif self.direction == 'flipped':
257 mapper.screen_bounds = (x2, x)
258 elif self.orientation == 'v':
259 if self.direction == 'normal':
260 mapper.screen_bounds = (y, y2)
261 elif self.direction == 'flipped':
262 mapper.screen_bounds = (y2, y)
263
264 self.invalidate_draw()
265 self._cache_valid = False
266 self._screen_cache_valid = False
267
268 #------------------------------------------------------------------------
269 # Property setters and getters
270 #------------------------------------------------------------------------
271
272 @cached_property
273 def _get_x_mapper(self):
274 if self.orientation == "h":
275 return self.index_mapper
276 else:
277 return None
278
279 @cached_property
280 def _get_y_mapper(self):
281 if self.orientation == "h":
282 return None
283 else:
284 return self.index_mapper
285
286 @cached_property
287 def _get_origin(self):
288 if self.orientation == 'h':
289 if self.direction == 'normal':
290 return 'bottom left'
291 else:
292 return 'bottom right'
293 else:
294 if self.direction == 'normal':
295 return 'bottom left'
296 else:
297 return 'top left'
298
299 #------------------------------------------------------------------------
300 # Event handlers
301 #------------------------------------------------------------------------
302
303 @on_trait_change("index.data_changed")
304 def _invalidate(self):
305 self._cache_valid = False
306 self._screen_cache_valid = False
307
308 @on_trait_change("index_mapper.updated")
309 def _invalidate_screen(self):
310 self._screen_cache_valid = False
311
312 def _bounds_changed(self, old, new):
313 super(Base1DPlot, self)._bounds_changed(old, new)
314 self._update_mappers()
315
316 def _bounds_items_changed(self, event):
317 super(Base1DPlot, self)._bounds_items_changed(event)
318 self._update_mappers()
319
320 def _position_changed(self, old, new):
321 super(Base1DPlot, self)._position_changed(old, new)
322 self._update_mappers()
323
324 def _position_items_changed(self, event):
325 super(Base1DPlot, self)._position_items_changed(event)
326 self._update_mappers()
327
328 def _updated_changed_for_index_mapper(self):
329 self._update_mappers()
330
331 def _orientation_changed(self):
332 self._update_mappers()
333
334 def _direction_changed(self):
335 self._update_mappers()
336
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chaco/base_1d_plot.py b/chaco/base_1d_plot.py
--- a/chaco/base_1d_plot.py
+++ b/chaco/base_1d_plot.py
@@ -32,6 +32,9 @@
#: Screen mapper for index data.
index_mapper = Instance(AbstractMapper)
+ #: Convenience property for accessing the data range of the mapper.
+ index_range = Property(depends_on="index_mapper.range")
+
#: Corresponds to either **index_mapper** or None, depending on
#: the orientation of the plot.
x_mapper = Property(depends_on=['orientation', 'index_mapper'])
@@ -269,6 +272,12 @@
# Property setters and getters
#------------------------------------------------------------------------
+ def _get_index_range(self):
+ return self.index_mapper.range
+
+ def _set_index_range(self, val):
+ self.index_mapper.range = val
+
@cached_property
def _get_x_mapper(self):
if self.orientation == "h":
|
{"golden_diff": "diff --git a/chaco/base_1d_plot.py b/chaco/base_1d_plot.py\n--- a/chaco/base_1d_plot.py\n+++ b/chaco/base_1d_plot.py\n@@ -32,6 +32,9 @@\n #: Screen mapper for index data.\n index_mapper = Instance(AbstractMapper)\n \n+ #: Convenience property for accessing the data range of the mapper.\n+ index_range = Property(depends_on=\"index_mapper.range\")\n+\n #: Corresponds to either **index_mapper** or None, depending on\n #: the orientation of the plot.\n x_mapper = Property(depends_on=['orientation', 'index_mapper'])\n@@ -269,6 +272,12 @@\n # Property setters and getters\n #------------------------------------------------------------------------\n \n+ def _get_index_range(self):\n+ return self.index_mapper.range\n+\n+ def _set_index_range(self, val):\n+ self.index_mapper.range = val\n+\n @cached_property\n def _get_x_mapper(self):\n if self.orientation == \"h\":\n", "issue": "Base1DPlot doesn't have a index_range trait, doesn't listen to changes upstream\n[Base1DPlot](https://github.com/enthought/chaco/blob/021a921a48f317705a86b179a30395b6ccba7b42/chaco/base_1d_plot.py#L21) doesn't have a `index_range`. As a result, if the `index_range` trait is set on a `Plot` that contains a `Plot.plot_1d()`, the notification doesn't reach the 1D plot. In such case, one needs to explicitly set `Base1DPlot.index_mapper.range` to match.\r\n\r\nExample:\r\n```python\r\nfrom numpy import linspace, sin\r\nfrom traits.api import Button, HasTraits, Instance\r\nfrom traitsui.api import View, Item, VGroup\r\nfrom chaco.api import Plot, ArrayPlotData, DataRange1D\r\nfrom enable.api import ComponentEditor\r\n\r\nclass MWE(HasTraits):\r\n plot = Instance(Plot)\r\n button1 = Button(\"zoom X (bug)\")\r\n button2 = Button(\"zoom X (workaround)\")\r\n traits_view = View(VGroup(\r\n Item('plot', editor=ComponentEditor()),\r\n Item('button1'),\r\n Item('button2'),\r\n ))\r\n\r\n def _plot_default(self):\r\n x = linspace(-14, 14, 100)\r\n y = sin(x) * x**3\r\n u = linspace(-14, 14, 10)\r\n plotdata = ArrayPlotData(x=x, y=y, u=u)\r\n plot = Plot(plotdata)\r\n plot.plot((\"x\", \"y\"), type=\"line\", color=\"blue\")\r\n plot.plot_1d((\"u\"), type=\"scatter_1d\", name=\"dots\")\r\n return plot\r\n \r\n def _button1_changed(self):\r\n value_range = DataRange1D()\r\n value_range.set_bounds(-5, 5)\r\n self.plot.index_range = value_range # DOESN'T reach the 1D plot!\r\n\r\n def _button2_changed(self):\r\n self._button1_changed()\r\n # manually zoom the 1D plot \r\n self.plot.plots[\"dots\"][0].index_mapper.range = self.plot.index_range\r\n\r\nif __name__ == \"__main__\":\r\n MWE().configure_traits()\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nAbstract base class for 1-D plots which only use one axis\n\"\"\"\n\nfrom __future__ import absolute_import\n\n# Standard library imports\nfrom numpy import argsort, asarray\n\n# Enthought library imports\nfrom traits.api import (Any, Bool, Enum, Instance, Property, cached_property,\n on_trait_change)\n\n# local imports\nfrom .abstract_plot_renderer import AbstractPlotRenderer\nfrom .abstract_mapper import AbstractMapper\nfrom .array_data_source import ArrayDataSource\nfrom .base import reverse_map_1d\n\n\nclass Base1DPlot(AbstractPlotRenderer):\n \"\"\" Base class for one-dimensional plots\n\n This class provides a base for plots such as jitter plots, color bars,\n single-axis scatter plots, and geophysical horizon and tops plots.\n\n \"\"\"\n\n #: The data source of values\n index = Instance(ArrayDataSource)\n\n #: Screen mapper for index data.\n index_mapper = Instance(AbstractMapper)\n\n #: Corresponds to either **index_mapper** or None, depending on\n #: the orientation of the plot.\n x_mapper = Property(depends_on=['orientation', 'index_mapper'])\n\n #: Corresponds to either **index_mapper** or None, depending on\n #: the orientation of the plot.\n y_mapper = Property(depends_on=['orientation', 'index_mapper'])\n\n #: The orientation of the index axis.\n orientation = Enum('v', 'h')\n\n #: Should the plot go left-to-right or bottom-to-top (normal) or the reverse?\n direction = Enum('normal', 'flipped')\n\n #: Faux origin for the axes and other objects to look at\n origin = Property(\n Enum('bottom left', 'top left', 'bottom right', 'top right'),\n depends_on=['orientation', 'direction']\n )\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n #: flag whether the data cache is valid\n _cache_valid = Bool(False)\n\n #: cache of the index values in data space\n _cached_data = Any()\n\n #: cache of the sorted index values in data space\n _cached_data_pts_sorted = Any()\n\n #: cache of the sorted indices of the index values\n _cached_data_argsort = Any()\n\n #: flag whether the screen coordinates are valid\n _screen_cache_valid = Bool(False)\n\n #: cache holding the screen coordinates of the index values\n _cached_screen_pts = Any()\n\n #------------------------------------------------------------------------\n # AbstractPlotRenderer interface\n #------------------------------------------------------------------------\n\n def map_screen(self, data_array):\n \"\"\" Maps a 1D array of data points into screen space and returns it as\n a 1D array.\n\n Parameters\n ----------\n\n data_array : 1D array\n An array of data-space values to be mapped to screen coordinates.\n\n Returns\n -------\n\n screen_array : 1D array\n An array of points in screen space, either x-values (if\n orientation is 'h') or y-values (if orientation is 'v').\n\n Notes\n -----\n\n Returning a 1D array is experimental, and may break some tools and\n overlays. If needs be we can refactor so that it returns a 2D array.\n\n \"\"\"\n # data_array is 1D array of length N\n if len(data_array) == 0:\n return []\n return asarray(self.index_mapper.map_screen(data_array))\n\n def map_data(self, screen_pts):\n \"\"\" Maps 2D screen space points into the 1D index space of the plot.\n\n Parameters\n ----------\n\n screen_pts : tuple of x-array, y-array\n 2 arrays (or values) screen space coordinates.\n\n Returns\n -------\n\n data_array : 1D array\n An array of points in data space corresponding to the screen-space\n points.\n\n \"\"\"\n x, y = screen_pts\n if self.orientation == \"v\":\n return asarray(self.index_mapper.map_data(y))\n else:\n return asarray(self.index_mapper.map_data(x))\n\n def map_index(self, screen_pt, threshold=2.0, outside_returns_none=True,\n index_only=True):\n \"\"\" Maps a screen space point to an index into the plot's index array.\n\n Parameters\n ----------\n\n screen_pts: tuple of x-array, y-array\n 2 arrays (or values) screen space coordinates.\n threshold : float\n Optional screen-space distance allowed between *screen_pt* and the\n plot; if non-zero, then a *screen_pt* within this distance is\n mapped to the neared plot index. (This feature is useful for sparse\n data.)\n outside_returns_none : Boolean\n If True, then if *screen_pt* is outside the range of the data, the\n method returns None. If False, it returns the nearest end index in\n such a case.\n index_only : Boolean\n This is included for compatibity with the base class, but is\n ignored, as it is always true for 1D plots.\n\n Returns\n -------\n\n index : int\n An index into the index array. If the input point cannot be mapped\n to an index, then None is returned.\n\n If *screen_pt* corresponds to multiple indices, then only the first\n index is returned.\n\n \"\"\"\n data_pt = self.map_data(screen_pt)\n\n if ((data_pt < self.index_mapper.range.low) or \\\n (data_pt > self.index_mapper.range.high)) and \\\n outside_returns_none:\n return None\n\n if self._cached_data_pts_sorted is None:\n self._cached_data_argsort = argsort(self._cached_data)\n self._cached_data_pts_sorted = self._cached_data[self._cached_data_argsort]\n\n # XXX better to just use argmin(abs(data - data_pt))?\n\n data = self._cached_data_pts_sorted\n try:\n ndx = reverse_map_1d(data, data_pt, \"ascending\")\n except IndexError:\n if outside_returns_none:\n return None\n else:\n if data_pt < data[0]:\n return 0\n else:\n return len(data) - 1\n\n orig_ndx = self._cached_data_argsort[ndx]\n\n if threshold == 0.0:\n return orig_ndx\n\n screen_points = self._cached_screen_pts\n x = screen_points[orig_ndx]\n if self.orientation == 'v':\n x0 = screen_pt[1]\n else:\n x0 = screen_pt[0]\n\n if abs(x - x0) <= threshold:\n return orig_ndx\n else:\n return None\n\n #------------------------------------------------------------------------\n # Private methods\n #------------------------------------------------------------------------\n\n def _compute_screen_coord(self):\n \"\"\" Compute the screen coordinates of the index values \"\"\"\n if not self._screen_cache_valid:\n self._gather_points()\n pts = self.map_screen(self._cached_data)\n self._cached_screen_pts = pts\n self._screen_cache_valid = True\n self._cached_data_pts_sorted = None\n self._cached_data_argsort = None\n return self._cached_screen_pts\n\n def _gather_points(self):\n \"\"\" Ensure that data cache is valid \"\"\"\n if self._cache_valid:\n return\n if not self.index:\n return\n\n index, index_mask = self.index.get_data_mask()\n if len(index) == 0:\n self._cached_data = []\n self._cache_valid = True\n return\n\n self._cached_data = index\n self._cache_valid = True\n self._cached_screen_points = None\n self._screen_cached_valid = False\n\n def _update_mappers(self):\n \"\"\" Update the mapper when the bounds, orientation or direction change\n \"\"\"\n mapper = self.index_mapper\n if mapper is None:\n return\n\n x = self.x\n x2 = self.x2\n y = self.y\n y2 = self.y2\n\n if self.orientation == 'h':\n if self.direction == 'normal':\n mapper.screen_bounds = (x, x2)\n elif self.direction == 'flipped':\n mapper.screen_bounds = (x2, x)\n elif self.orientation == 'v':\n if self.direction == 'normal':\n mapper.screen_bounds = (y, y2)\n elif self.direction == 'flipped':\n mapper.screen_bounds = (y2, y)\n\n self.invalidate_draw()\n self._cache_valid = False\n self._screen_cache_valid = False\n\n #------------------------------------------------------------------------\n # Property setters and getters\n #------------------------------------------------------------------------\n\n @cached_property\n def _get_x_mapper(self):\n if self.orientation == \"h\":\n return self.index_mapper\n else:\n return None\n\n @cached_property\n def _get_y_mapper(self):\n if self.orientation == \"h\":\n return None\n else:\n return self.index_mapper\n\n @cached_property\n def _get_origin(self):\n if self.orientation == 'h':\n if self.direction == 'normal':\n return 'bottom left'\n else:\n return 'bottom right'\n else:\n if self.direction == 'normal':\n return 'bottom left'\n else:\n return 'top left'\n\n #------------------------------------------------------------------------\n # Event handlers\n #------------------------------------------------------------------------\n\n @on_trait_change(\"index.data_changed\")\n def _invalidate(self):\n self._cache_valid = False\n self._screen_cache_valid = False\n\n @on_trait_change(\"index_mapper.updated\")\n def _invalidate_screen(self):\n self._screen_cache_valid = False\n\n def _bounds_changed(self, old, new):\n super(Base1DPlot, self)._bounds_changed(old, new)\n self._update_mappers()\n\n def _bounds_items_changed(self, event):\n super(Base1DPlot, self)._bounds_items_changed(event)\n self._update_mappers()\n\n def _position_changed(self, old, new):\n super(Base1DPlot, self)._position_changed(old, new)\n self._update_mappers()\n\n def _position_items_changed(self, event):\n super(Base1DPlot, self)._position_items_changed(event)\n self._update_mappers()\n\n def _updated_changed_for_index_mapper(self):\n self._update_mappers()\n\n def _orientation_changed(self):\n self._update_mappers()\n\n def _direction_changed(self):\n self._update_mappers()\n", "path": "chaco/base_1d_plot.py"}], "after_files": [{"content": "\"\"\"\nAbstract base class for 1-D plots which only use one axis\n\"\"\"\n\nfrom __future__ import absolute_import\n\n# Standard library imports\nfrom numpy import argsort, asarray\n\n# Enthought library imports\nfrom traits.api import (Any, Bool, Enum, Instance, Property, cached_property,\n on_trait_change)\n\n# local imports\nfrom .abstract_plot_renderer import AbstractPlotRenderer\nfrom .abstract_mapper import AbstractMapper\nfrom .array_data_source import ArrayDataSource\nfrom .base import reverse_map_1d\n\n\nclass Base1DPlot(AbstractPlotRenderer):\n \"\"\" Base class for one-dimensional plots\n\n This class provides a base for plots such as jitter plots, color bars,\n single-axis scatter plots, and geophysical horizon and tops plots.\n\n \"\"\"\n\n #: The data source of values\n index = Instance(ArrayDataSource)\n\n #: Screen mapper for index data.\n index_mapper = Instance(AbstractMapper)\n\n #: Convenience property for accessing the data range of the mapper.\n index_range = Property(depends_on=\"index_mapper.range\")\n\n #: Corresponds to either **index_mapper** or None, depending on\n #: the orientation of the plot.\n x_mapper = Property(depends_on=['orientation', 'index_mapper'])\n\n #: Corresponds to either **index_mapper** or None, depending on\n #: the orientation of the plot.\n y_mapper = Property(depends_on=['orientation', 'index_mapper'])\n\n #: The orientation of the index axis.\n orientation = Enum('v', 'h')\n\n #: Should the plot go left-to-right or bottom-to-top (normal) or the reverse?\n direction = Enum('normal', 'flipped')\n\n #: Faux origin for the axes and other objects to look at\n origin = Property(\n Enum('bottom left', 'top left', 'bottom right', 'top right'),\n depends_on=['orientation', 'direction']\n )\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n #: flag whether the data cache is valid\n _cache_valid = Bool(False)\n\n #: cache of the index values in data space\n _cached_data = Any()\n\n #: cache of the sorted index values in data space\n _cached_data_pts_sorted = Any()\n\n #: cache of the sorted indices of the index values\n _cached_data_argsort = Any()\n\n #: flag whether the screen coordinates are valid\n _screen_cache_valid = Bool(False)\n\n #: cache holding the screen coordinates of the index values\n _cached_screen_pts = Any()\n\n #------------------------------------------------------------------------\n # AbstractPlotRenderer interface\n #------------------------------------------------------------------------\n\n def map_screen(self, data_array):\n \"\"\" Maps a 1D array of data points into screen space and returns it as\n a 1D array.\n\n Parameters\n ----------\n\n data_array : 1D array\n An array of data-space values to be mapped to screen coordinates.\n\n Returns\n -------\n\n screen_array : 1D array\n An array of points in screen space, either x-values (if\n orientation is 'h') or y-values (if orientation is 'v').\n\n Notes\n -----\n\n Returning a 1D array is experimental, and may break some tools and\n overlays. If needs be we can refactor so that it returns a 2D array.\n\n \"\"\"\n # data_array is 1D array of length N\n if len(data_array) == 0:\n return []\n return asarray(self.index_mapper.map_screen(data_array))\n\n def map_data(self, screen_pts):\n \"\"\" Maps 2D screen space points into the 1D index space of the plot.\n\n Parameters\n ----------\n\n screen_pts : tuple of x-array, y-array\n 2 arrays (or values) screen space coordinates.\n\n Returns\n -------\n\n data_array : 1D array\n An array of points in data space corresponding to the screen-space\n points.\n\n \"\"\"\n x, y = screen_pts\n if self.orientation == \"v\":\n return asarray(self.index_mapper.map_data(y))\n else:\n return asarray(self.index_mapper.map_data(x))\n\n def map_index(self, screen_pt, threshold=2.0, outside_returns_none=True,\n index_only=True):\n \"\"\" Maps a screen space point to an index into the plot's index array.\n\n Parameters\n ----------\n\n screen_pts: tuple of x-array, y-array\n 2 arrays (or values) screen space coordinates.\n threshold : float\n Optional screen-space distance allowed between *screen_pt* and the\n plot; if non-zero, then a *screen_pt* within this distance is\n mapped to the neared plot index. (This feature is useful for sparse\n data.)\n outside_returns_none : Boolean\n If True, then if *screen_pt* is outside the range of the data, the\n method returns None. If False, it returns the nearest end index in\n such a case.\n index_only : Boolean\n This is included for compatibity with the base class, but is\n ignored, as it is always true for 1D plots.\n\n Returns\n -------\n\n index : int\n An index into the index array. If the input point cannot be mapped\n to an index, then None is returned.\n\n If *screen_pt* corresponds to multiple indices, then only the first\n index is returned.\n\n \"\"\"\n data_pt = self.map_data(screen_pt)\n\n if ((data_pt < self.index_mapper.range.low) or \\\n (data_pt > self.index_mapper.range.high)) and \\\n outside_returns_none:\n return None\n\n if self._cached_data_pts_sorted is None:\n self._cached_data_argsort = argsort(self._cached_data)\n self._cached_data_pts_sorted = self._cached_data[self._cached_data_argsort]\n\n # XXX better to just use argmin(abs(data - data_pt))?\n\n data = self._cached_data_pts_sorted\n try:\n ndx = reverse_map_1d(data, data_pt, \"ascending\")\n except IndexError:\n if outside_returns_none:\n return None\n else:\n if data_pt < data[0]:\n return 0\n else:\n return len(data) - 1\n\n orig_ndx = self._cached_data_argsort[ndx]\n\n if threshold == 0.0:\n return orig_ndx\n\n screen_points = self._cached_screen_pts\n x = screen_points[orig_ndx]\n if self.orientation == 'v':\n x0 = screen_pt[1]\n else:\n x0 = screen_pt[0]\n\n if abs(x - x0) <= threshold:\n return orig_ndx\n else:\n return None\n\n #------------------------------------------------------------------------\n # Private methods\n #------------------------------------------------------------------------\n\n def _compute_screen_coord(self):\n \"\"\" Compute the screen coordinates of the index values \"\"\"\n if not self._screen_cache_valid:\n self._gather_points()\n pts = self.map_screen(self._cached_data)\n self._cached_screen_pts = pts\n self._screen_cache_valid = True\n self._cached_data_pts_sorted = None\n self._cached_data_argsort = None\n return self._cached_screen_pts\n\n def _gather_points(self):\n \"\"\" Ensure that data cache is valid \"\"\"\n if self._cache_valid:\n return\n if not self.index:\n return\n\n index, index_mask = self.index.get_data_mask()\n if len(index) == 0:\n self._cached_data = []\n self._cache_valid = True\n return\n\n self._cached_data = index\n self._cache_valid = True\n self._cached_screen_points = None\n self._screen_cached_valid = False\n\n def _update_mappers(self):\n \"\"\" Update the mapper when the bounds, orientation or direction change\n \"\"\"\n mapper = self.index_mapper\n if mapper is None:\n return\n\n x = self.x\n x2 = self.x2\n y = self.y\n y2 = self.y2\n\n if self.orientation == 'h':\n if self.direction == 'normal':\n mapper.screen_bounds = (x, x2)\n elif self.direction == 'flipped':\n mapper.screen_bounds = (x2, x)\n elif self.orientation == 'v':\n if self.direction == 'normal':\n mapper.screen_bounds = (y, y2)\n elif self.direction == 'flipped':\n mapper.screen_bounds = (y2, y)\n\n self.invalidate_draw()\n self._cache_valid = False\n self._screen_cache_valid = False\n\n #------------------------------------------------------------------------\n # Property setters and getters\n #------------------------------------------------------------------------\n\n def _get_index_range(self):\n return self.index_mapper.range\n\n def _set_index_range(self, val):\n self.index_mapper.range = val\n\n @cached_property\n def _get_x_mapper(self):\n if self.orientation == \"h\":\n return self.index_mapper\n else:\n return None\n\n @cached_property\n def _get_y_mapper(self):\n if self.orientation == \"h\":\n return None\n else:\n return self.index_mapper\n\n @cached_property\n def _get_origin(self):\n if self.orientation == 'h':\n if self.direction == 'normal':\n return 'bottom left'\n else:\n return 'bottom right'\n else:\n if self.direction == 'normal':\n return 'bottom left'\n else:\n return 'top left'\n\n #------------------------------------------------------------------------\n # Event handlers\n #------------------------------------------------------------------------\n\n @on_trait_change(\"index.data_changed\")\n def _invalidate(self):\n self._cache_valid = False\n self._screen_cache_valid = False\n\n @on_trait_change(\"index_mapper.updated\")\n def _invalidate_screen(self):\n self._screen_cache_valid = False\n\n def _bounds_changed(self, old, new):\n super(Base1DPlot, self)._bounds_changed(old, new)\n self._update_mappers()\n\n def _bounds_items_changed(self, event):\n super(Base1DPlot, self)._bounds_items_changed(event)\n self._update_mappers()\n\n def _position_changed(self, old, new):\n super(Base1DPlot, self)._position_changed(old, new)\n self._update_mappers()\n\n def _position_items_changed(self, event):\n super(Base1DPlot, self)._position_items_changed(event)\n self._update_mappers()\n\n def _updated_changed_for_index_mapper(self):\n self._update_mappers()\n\n def _orientation_changed(self):\n self._update_mappers()\n\n def _direction_changed(self):\n self._update_mappers()\n", "path": "chaco/base_1d_plot.py"}]}
| 4,009 | 228 |
gh_patches_debug_3860
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-1336
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update_version.sh should update new documentation
Currently the "set_up_admin_tails" documentation looks like this, I suspect that between every release FPF just increments the number (from .3.6 to .3.7). Luckily we have computers to automate small tasks for us :-) Is there a programmatic way to do this?
```
cd securedrop/
git checkout 0.3.6
git tag -v 0.3.6
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # SecureDrop documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import sys
16 import os
17 import shlex
18
19 # Detect if we're being built by Read the Docs
20 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
21 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
22
23 # If extensions (or modules to document with autodoc) are in another directory,
24 # add these directories to sys.path here. If the directory is relative to the
25 # documentation root, use os.path.abspath to make it absolute, like shown here.
26 #sys.path.insert(0, os.path.abspath('.'))
27
28 # -- General configuration ------------------------------------------------
29
30 # If your documentation needs a minimal Sphinx version, state it here.
31 #needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = ['sphinx.ext.todo', ]
37
38 # Add any paths that contain templates here, relative to this directory.
39 templates_path = ['_templates']
40
41 # The suffix(es) of source filenames.
42 # You can specify multiple suffix as a list of string:
43 # source_suffix = ['.rst', '.md']
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8-sig'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'SecureDrop'
54 copyright = u'2015, Freedom of the Press Foundation'
55 author = u'SecureDrop Team and Contributors'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 version = '0.3.5'
63 # The full version, including alpha/beta/rc tags.
64 release = '0.3.5'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #
69 # This is also used if you do content translation via gettext catalogs.
70 # Usually you set "language" from the command line for these cases.
71 language = None
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The reST default role (used for this markup: `text`) to use for all
84 # documents.
85 #default_role = None
86
87 # If true, '()' will be appended to :func: etc. cross-reference text.
88 #add_function_parentheses = True
89
90 # If true, the current module name will be prepended to all description
91 # unit titles (such as .. function::).
92 #add_module_names = True
93
94 # If true, sectionauthor and moduleauthor directives will be shown in the
95 # output. They are ignored by default.
96 #show_authors = False
97
98 # The name of the Pygments (syntax highlighting) style to use.
99 pygments_style = 'sphinx'
100
101 # A list of ignored prefixes for module index sorting.
102 #modindex_common_prefix = []
103
104 # If true, keep warnings as "system message" paragraphs in the built documents.
105 #keep_warnings = False
106
107 # If true, `todo` and `todoList` produce output, else they produce nothing.
108 todo_include_todos = False
109
110
111 # -- Options for HTML output ----------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 if on_rtd:
116 html_theme = 'default'
117 else:
118 try:
119 # If you want to build the docs locally using the RTD theme,
120 # you may need to install it: ``pip install sphinx_rtd_theme``.
121 # https://github.com/snide/sphinx_rtd_theme#via-package
122 import sphinx_rtd_theme
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125 except ImportError:
126 # This theme is included with Sphinx and is quite nice (based
127 # on the Pocoo themes), but since we're using the RTD theme
128 # for the production docs, it's best to use that to avoid
129 # issues due to discrepancies between the themes.
130 html_theme = 'alabaster'
131
132 # Theme options are theme-specific and customize the look and feel of a theme
133 # further. For a list of options available for each theme, see the
134 # documentation.
135 #html_theme_options = {}
136
137 # Add any paths that contain custom themes here, relative to this directory.
138 #html_theme_path = []
139
140 # The name for this set of Sphinx documents. If None, it defaults to
141 # "<project> v<release> documentation".
142 #html_title = None
143
144 # A shorter title for the navigation bar. Default is the same as html_title.
145 #html_short_title = None
146
147 # The name of an image file (relative to this directory) to place at the top
148 # of the sidebar.
149 #html_logo = None
150
151 # The name of an image file (within the static path) to use as favicon of the
152 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
153 # pixels large.
154 #html_favicon = None
155
156 # Add any paths that contain custom static files (such as style sheets) here,
157 # relative to this directory. They are copied after the builtin static files,
158 # so a file named "default.css" will overwrite the builtin "default.css".
159 html_static_path = ['_static']
160
161 # Add any extra paths that contain custom files (such as robots.txt or
162 # .htaccess) here, relative to this directory. These files are copied
163 # directly to the root of the documentation.
164 #html_extra_path = []
165
166 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
167 # using the given strftime format.
168 #html_last_updated_fmt = '%b %d, %Y'
169
170 # If true, SmartyPants will be used to convert quotes and dashes to
171 # typographically correct entities.
172 #html_use_smartypants = True
173
174 # Custom sidebar templates, maps document names to template names.
175 #html_sidebars = {}
176
177 # Additional templates that should be rendered to pages, maps page names to
178 # template names.
179 #html_additional_pages = {}
180
181 # If false, no module index is generated.
182 #html_domain_indices = True
183
184 # If false, no index is generated.
185 #html_use_index = True
186
187 # If true, the index is split into individual pages for each letter.
188 #html_split_index = False
189
190 # If true, links to the reST sources are added to the pages.
191 #html_show_sourcelink = True
192
193 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
194 #html_show_sphinx = True
195
196 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
197 #html_show_copyright = True
198
199 # If true, an OpenSearch description file will be output, and all pages will
200 # contain a <link> tag referring to it. The value of this option must be the
201 # base URL from which the finished HTML is served.
202 #html_use_opensearch = ''
203
204 # This is the file name suffix for HTML files (e.g. ".xhtml").
205 #html_file_suffix = None
206
207 # Language to be used for generating the HTML full-text search index.
208 # Sphinx supports the following languages:
209 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
210 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
211 #html_search_language = 'en'
212
213 # A dictionary with options for the search language support, empty by default.
214 # Now only 'ja' uses this config value
215 #html_search_options = {'type': 'default'}
216
217 # The name of a javascript file (relative to the configuration directory) that
218 # implements a search results scorer. If empty, the default will be used.
219 #html_search_scorer = 'scorer.js'
220
221 # Output file base name for HTML help builder.
222 htmlhelp_basename = 'SecureDropdoc'
223
224 # -- Options for LaTeX output ---------------------------------------------
225
226 latex_elements = {
227 # The paper size ('letterpaper' or 'a4paper').
228 #'papersize': 'letterpaper',
229
230 # The font size ('10pt', '11pt' or '12pt').
231 #'pointsize': '10pt',
232
233 # Additional stuff for the LaTeX preamble.
234 #'preamble': '',
235
236 # Latex figure (float) alignment
237 #'figure_align': 'htbp',
238 }
239
240 # Grouping the document tree into LaTeX files. List of tuples
241 # (source start file, target name, title,
242 # author, documentclass [howto, manual, or own class]).
243 latex_documents = [
244 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',
245 author, 'manual'),
246 ]
247
248 # The name of an image file (relative to this directory) to place at the top of
249 # the title page.
250 #latex_logo = None
251
252 # For "manual" documents, if this is true, then toplevel headings are parts,
253 # not chapters.
254 #latex_use_parts = False
255
256 # If true, show page references after internal links.
257 #latex_show_pagerefs = False
258
259 # If true, show URL addresses after external links.
260 #latex_show_urls = False
261
262 # Documents to append as an appendix to all manuals.
263 #latex_appendices = []
264
265 # If false, no module index is generated.
266 #latex_domain_indices = True
267
268
269 # -- Options for manual page output ---------------------------------------
270
271 # One entry per manual page. List of tuples
272 # (source start file, name, description, authors, manual section).
273 man_pages = [
274 (master_doc, 'securedrop', u'SecureDrop Documentation',
275 [author], 1)
276 ]
277
278 # If true, show URL addresses after external links.
279 #man_show_urls = False
280
281
282 # -- Options for Texinfo output -------------------------------------------
283
284 # Grouping the document tree into Texinfo files. List of tuples
285 # (source start file, target name, title, author,
286 # dir menu entry, description, category)
287 texinfo_documents = [
288 (master_doc, 'SecureDrop', u'SecureDrop Documentation',
289 author, 'SecureDrop', 'One line description of project.',
290 'Miscellaneous'),
291 ]
292
293 # Documents to append as an appendix to all manuals.
294 #texinfo_appendices = []
295
296 # If false, no module index is generated.
297 #texinfo_domain_indices = True
298
299 # How to display URL addresses: 'footnote', 'no', or 'inline'.
300 #texinfo_show_urls = 'footnote'
301
302 # If true, do not generate a @detailmenu in the "Top" node's menu.
303 #texinfo_no_detailmenu = False
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -59,9 +59,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.3.5'
+version = '0.3.7'
# The full version, including alpha/beta/rc tags.
-release = '0.3.5'
+release = '0.3.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,9 +59,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.3.5'\n+version = '0.3.7'\n # The full version, including alpha/beta/rc tags.\n-release = '0.3.5'\n+release = '0.3.7'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n", "issue": "update_version.sh should update new documentation\nCurrently the \"set_up_admin_tails\" documentation looks like this, I suspect that between every release FPF just increments the number (from .3.6 to .3.7). Luckily we have computers to automate small tasks for us :-) Is there a programmatic way to do this? \n\n```\ncd securedrop/\ngit checkout 0.3.6\ngit tag -v 0.3.6\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2015, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.3.5'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.3.5'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\nimport shlex\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2015, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.3.7'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.3.7'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]}
| 3,711 | 120 |
gh_patches_debug_37522
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-915
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError: 'typing' when creating new Datasource from MySQL
Using [airplane_satisfaction dataset](https://www.kaggle.com/teejmahal20/airline-passenger-satisfaction?select=train.csv) to create a data source from Scout through MySQL throws bellow error:
```
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
ERROR:mindsdb.api.http.initialize:Exception on /api/datasources/AirplaneData [PUT]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 44, in cast_df_columns_types
col_type = stats[name]['typing']['data_type']
KeyError: 'typing'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/interfaces/datastore/sqlite_helpers.py`
Content:
```
1 import sqlite3
2 from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
3 import re
4
5 def create_sqlite_db(path, data_frame):
6 con = sqlite3.connect(path)
7 data_frame.to_sql(name='data', con=con, index=False)
8 con.close()
9
10 def cast_df_columns_types(df, stats):
11 types_map = {
12 DATA_TYPES.NUMERIC: {
13 DATA_SUBTYPES.INT: 'int64',
14 DATA_SUBTYPES.FLOAT: 'float64',
15 DATA_SUBTYPES.BINARY: 'bool'
16 },
17 DATA_TYPES.DATE: {
18 DATA_SUBTYPES.DATE: 'datetime64', # YYYY-MM-DD
19 DATA_SUBTYPES.TIMESTAMP: 'datetime64' # YYYY-MM-DD hh:mm:ss or 1852362464
20 },
21 DATA_TYPES.CATEGORICAL: {
22 DATA_SUBTYPES.SINGLE: 'category',
23 DATA_SUBTYPES.MULTIPLE: 'category'
24 },
25 DATA_TYPES.FILE_PATH: {
26 DATA_SUBTYPES.IMAGE: 'object',
27 DATA_SUBTYPES.VIDEO: 'object',
28 DATA_SUBTYPES.AUDIO: 'object'
29 },
30 DATA_TYPES.SEQUENTIAL: {
31 DATA_SUBTYPES.ARRAY: 'object'
32 },
33 DATA_TYPES.TEXT: {
34 DATA_SUBTYPES.SHORT: 'object',
35 DATA_SUBTYPES.RICH: 'object'
36 }
37 }
38
39 columns = [dict(name=x) for x in list(df.keys())]
40
41 for column in columns:
42 try:
43 name = column['name']
44 col_type = stats[name]['typing']['data_type']
45 col_subtype = stats[name]['typing']['data_subtype']
46 new_type = types_map[col_type][col_subtype]
47 if new_type == 'int64' or new_type == 'float64':
48 df[name] = df[name].apply(lambda x: x.replace(',','.') if isinstance(x, str) else x)
49 if new_type == 'int64':
50 df = df.astype({name: 'float64'})
51 df = df.astype({name: new_type})
52 except Exception as e:
53 print(e)
54 print(f'Error: cant convert type of DS column {name} to {new_type}')
55
56 return df
57
58 def parse_filter(key, value):
59 result = re.search(r'filter(_*.*)\[(.*)\]', key)
60 operator = result.groups()[0].strip('_') or 'like'
61 field = result.groups()[1]
62 operators_map = {
63 'like': 'like',
64 'in': 'in',
65 'nin': 'not in',
66 'gt': '>',
67 'lt': '<',
68 'gte': '>=',
69 'lte': '<=',
70 'eq': '=',
71 'neq': '!='
72 }
73 if operator not in operators_map:
74 return None
75 operator = operators_map[operator]
76 return {'field': field, 'value': value, 'operator': operator}
77
78
79 def prepare_sql_where(where):
80 marks = {}
81 if len(where) > 0:
82 for i in range(len(where)):
83 field = where[i]['field'].replace('"', '""')
84 operator = where[i]['operator']
85 value = where[i]['value']
86 var_name = f'var{i}'
87 if ' ' in field:
88 field = f'"{field}"'
89 if operator == 'like':
90 marks[var_name] = '%' + value + '%'
91 else:
92 marks[var_name] = value
93 where[i] = f'{field} {operator} :var{i}'
94 where = 'where ' + ' and '.join(where)
95 else:
96 where = ''
97 return where, marks
98
99 def get_sqlite_columns_names(cursor):
100 cursor.execute('pragma table_info(data);')
101 column_name_index = [x[0] for x in cursor.description].index('name')
102 columns = cursor.fetchall()
103 return [x[column_name_index] for x in columns]
104
105 def get_sqlite_data(db_path, where, limit, offset):
106 where = [] if where is None else where
107
108 con = sqlite3.connect(db_path)
109 cur = con.cursor()
110
111 offset = '' if limit is None or offset is None else f'offset {offset}'
112 limit = '' if limit is None else f'limit {limit}'
113
114 columns_names = get_sqlite_columns_names(cur)
115 where = [x for x in where if x['field'] in columns_names]
116 where, marks = prepare_sql_where(where)
117
118 count_query = ' '.join(['select count(1) from data', where])
119 cur.execute(count_query, marks)
120 rowcount = cur.fetchone()[0]
121
122 query = ' '.join(['select * from data', where, limit, offset])
123 cur.execute(query, marks)
124 data = cur.fetchall()
125 data = [dict(zip(columns_names, x)) for x in data]
126
127 cur.close()
128 con.close()
129
130 return {
131 'data': data,
132 'rowcount': rowcount,
133 'columns_names': columns_names
134 }
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/interfaces/datastore/sqlite_helpers.py b/mindsdb/interfaces/datastore/sqlite_helpers.py
--- a/mindsdb/interfaces/datastore/sqlite_helpers.py
+++ b/mindsdb/interfaces/datastore/sqlite_helpers.py
@@ -2,11 +2,13 @@
from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
import re
+
def create_sqlite_db(path, data_frame):
con = sqlite3.connect(path)
data_frame.to_sql(name='data', con=con, index=False)
con.close()
+
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
@@ -41,11 +43,14 @@
for column in columns:
try:
name = column['name']
- col_type = stats[name]['typing']['data_type']
- col_subtype = stats[name]['typing']['data_subtype']
- new_type = types_map[col_type][col_subtype]
+ if stats[name].get('empty', {}).get('is_empty', False):
+ new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]
+ else:
+ col_type = stats[name]['typing']['data_type']
+ col_subtype = stats[name]['typing']['data_subtype']
+ new_type = types_map[col_type][col_subtype]
if new_type == 'int64' or new_type == 'float64':
- df[name] = df[name].apply(lambda x: x.replace(',','.') if isinstance(x, str) else x)
+ df[name] = df[name].apply(lambda x: x.replace(',', '.') if isinstance(x, str) else x)
if new_type == 'int64':
df = df.astype({name: 'float64'})
df = df.astype({name: new_type})
@@ -55,6 +60,7 @@
return df
+
def parse_filter(key, value):
result = re.search(r'filter(_*.*)\[(.*)\]', key)
operator = result.groups()[0].strip('_') or 'like'
@@ -96,12 +102,14 @@
where = ''
return where, marks
+
def get_sqlite_columns_names(cursor):
cursor.execute('pragma table_info(data);')
column_name_index = [x[0] for x in cursor.description].index('name')
columns = cursor.fetchall()
return [x[column_name_index] for x in columns]
+
def get_sqlite_data(db_path, where, limit, offset):
where = [] if where is None else where
|
{"golden_diff": "diff --git a/mindsdb/interfaces/datastore/sqlite_helpers.py b/mindsdb/interfaces/datastore/sqlite_helpers.py\n--- a/mindsdb/interfaces/datastore/sqlite_helpers.py\n+++ b/mindsdb/interfaces/datastore/sqlite_helpers.py\n@@ -2,11 +2,13 @@\n from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES\n import re\n \n+\n def create_sqlite_db(path, data_frame):\n con = sqlite3.connect(path)\n data_frame.to_sql(name='data', con=con, index=False)\n con.close()\n \n+\n def cast_df_columns_types(df, stats):\n types_map = {\n DATA_TYPES.NUMERIC: {\n@@ -41,11 +43,14 @@\n for column in columns:\n try:\n name = column['name']\n- col_type = stats[name]['typing']['data_type']\n- col_subtype = stats[name]['typing']['data_subtype']\n- new_type = types_map[col_type][col_subtype]\n+ if stats[name].get('empty', {}).get('is_empty', False):\n+ new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]\n+ else:\n+ col_type = stats[name]['typing']['data_type']\n+ col_subtype = stats[name]['typing']['data_subtype']\n+ new_type = types_map[col_type][col_subtype]\n if new_type == 'int64' or new_type == 'float64':\n- df[name] = df[name].apply(lambda x: x.replace(',','.') if isinstance(x, str) else x)\n+ df[name] = df[name].apply(lambda x: x.replace(',', '.') if isinstance(x, str) else x)\n if new_type == 'int64':\n df = df.astype({name: 'float64'})\n df = df.astype({name: new_type})\n@@ -55,6 +60,7 @@\n \n return df\n \n+\n def parse_filter(key, value):\n result = re.search(r'filter(_*.*)\\[(.*)\\]', key)\n operator = result.groups()[0].strip('_') or 'like'\n@@ -96,12 +102,14 @@\n where = ''\n return where, marks\n \n+\n def get_sqlite_columns_names(cursor):\n cursor.execute('pragma table_info(data);')\n column_name_index = [x[0] for x in cursor.description].index('name')\n columns = cursor.fetchall()\n return [x[column_name_index] for x in columns]\n \n+\n def get_sqlite_data(db_path, where, limit, offset):\n where = [] if where is None else where\n", "issue": "KeyError: 'typing' when creating new Datasource from MySQL\nUsing [airplane_satisfaction dataset](https://www.kaggle.com/teejmahal20/airline-passenger-satisfaction?select=train.csv) to create a data source from Scout through MySQL throws bellow error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py\", line 248, in wrapper\r\n resp = f(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py\", line 113, in put\r\n ca.default_store.save_datasource(name, source_type, request.json)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py\", line 185, in save_datasource\r\n df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py\", line 54, in cast_df_columns_types\r\n print(f'Error: cant convert type of DS column {name} to {new_type}')\r\nUnboundLocalError: local variable 'new_type' referenced before assignment\r\nERROR:mindsdb.api.http.initialize:Exception on /api/datasources/AirplaneData [PUT]\r\nTraceback (most recent call last):\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py\", line 44, in cast_df_columns_types\r\n col_type = stats[name]['typing']['data_type']\r\nKeyError: 'typing'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py\", line 375, in wrapper\r\n resp = resource(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py\", line 89, in view\r\n return self.dispatch_request(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py\", line 44, in dispatch_request\r\n resp = meth(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py\", line 248, in wrapper\r\n resp = f(*args, **kwargs)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py\", line 113, in put\r\n ca.default_store.save_datasource(name, source_type, request.json)\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py\", line 185, in save_datasource\r\n df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])\r\n File \"/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py\", line 54, in cast_df_columns_types\r\n print(f'Error: cant convert type of DS column {name} to {new_type}')\r\nUnboundLocalError: local variable 'new_type' referenced before assignment\r\n\r\n```\n", "before_files": [{"content": "import sqlite3\nfrom mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES\nimport re\n\ndef create_sqlite_db(path, data_frame):\n con = sqlite3.connect(path)\n data_frame.to_sql(name='data', con=con, index=False)\n con.close()\n\ndef cast_df_columns_types(df, stats):\n types_map = {\n DATA_TYPES.NUMERIC: {\n DATA_SUBTYPES.INT: 'int64',\n DATA_SUBTYPES.FLOAT: 'float64',\n DATA_SUBTYPES.BINARY: 'bool'\n },\n DATA_TYPES.DATE: {\n DATA_SUBTYPES.DATE: 'datetime64', # YYYY-MM-DD\n DATA_SUBTYPES.TIMESTAMP: 'datetime64' # YYYY-MM-DD hh:mm:ss or 1852362464\n },\n DATA_TYPES.CATEGORICAL: {\n DATA_SUBTYPES.SINGLE: 'category',\n DATA_SUBTYPES.MULTIPLE: 'category'\n },\n DATA_TYPES.FILE_PATH: {\n DATA_SUBTYPES.IMAGE: 'object',\n DATA_SUBTYPES.VIDEO: 'object',\n DATA_SUBTYPES.AUDIO: 'object'\n },\n DATA_TYPES.SEQUENTIAL: {\n DATA_SUBTYPES.ARRAY: 'object'\n },\n DATA_TYPES.TEXT: {\n DATA_SUBTYPES.SHORT: 'object',\n DATA_SUBTYPES.RICH: 'object'\n }\n }\n\n columns = [dict(name=x) for x in list(df.keys())]\n\n for column in columns:\n try:\n name = column['name']\n col_type = stats[name]['typing']['data_type']\n col_subtype = stats[name]['typing']['data_subtype']\n new_type = types_map[col_type][col_subtype]\n if new_type == 'int64' or new_type == 'float64':\n df[name] = df[name].apply(lambda x: x.replace(',','.') if isinstance(x, str) else x)\n if new_type == 'int64':\n df = df.astype({name: 'float64'})\n df = df.astype({name: new_type})\n except Exception as e:\n print(e)\n print(f'Error: cant convert type of DS column {name} to {new_type}')\n\n return df\n\ndef parse_filter(key, value):\n result = re.search(r'filter(_*.*)\\[(.*)\\]', key)\n operator = result.groups()[0].strip('_') or 'like'\n field = result.groups()[1]\n operators_map = {\n 'like': 'like',\n 'in': 'in',\n 'nin': 'not in',\n 'gt': '>',\n 'lt': '<',\n 'gte': '>=',\n 'lte': '<=',\n 'eq': '=',\n 'neq': '!='\n }\n if operator not in operators_map:\n return None\n operator = operators_map[operator]\n return {'field': field, 'value': value, 'operator': operator}\n\n\ndef prepare_sql_where(where):\n marks = {}\n if len(where) > 0:\n for i in range(len(where)):\n field = where[i]['field'].replace('\"', '\"\"')\n operator = where[i]['operator']\n value = where[i]['value']\n var_name = f'var{i}'\n if ' ' in field:\n field = f'\"{field}\"'\n if operator == 'like':\n marks[var_name] = '%' + value + '%'\n else:\n marks[var_name] = value\n where[i] = f'{field} {operator} :var{i}'\n where = 'where ' + ' and '.join(where)\n else:\n where = ''\n return where, marks\n\ndef get_sqlite_columns_names(cursor):\n cursor.execute('pragma table_info(data);')\n column_name_index = [x[0] for x in cursor.description].index('name')\n columns = cursor.fetchall()\n return [x[column_name_index] for x in columns]\n\ndef get_sqlite_data(db_path, where, limit, offset):\n where = [] if where is None else where\n\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n\n offset = '' if limit is None or offset is None else f'offset {offset}'\n limit = '' if limit is None else f'limit {limit}'\n\n columns_names = get_sqlite_columns_names(cur)\n where = [x for x in where if x['field'] in columns_names]\n where, marks = prepare_sql_where(where)\n\n count_query = ' '.join(['select count(1) from data', where])\n cur.execute(count_query, marks)\n rowcount = cur.fetchone()[0]\n\n query = ' '.join(['select * from data', where, limit, offset])\n cur.execute(query, marks)\n data = cur.fetchall()\n data = [dict(zip(columns_names, x)) for x in data]\n\n cur.close()\n con.close()\n\n return {\n 'data': data,\n 'rowcount': rowcount,\n 'columns_names': columns_names\n }\n", "path": "mindsdb/interfaces/datastore/sqlite_helpers.py"}], "after_files": [{"content": "import sqlite3\nfrom mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES\nimport re\n\n\ndef create_sqlite_db(path, data_frame):\n con = sqlite3.connect(path)\n data_frame.to_sql(name='data', con=con, index=False)\n con.close()\n\n\ndef cast_df_columns_types(df, stats):\n types_map = {\n DATA_TYPES.NUMERIC: {\n DATA_SUBTYPES.INT: 'int64',\n DATA_SUBTYPES.FLOAT: 'float64',\n DATA_SUBTYPES.BINARY: 'bool'\n },\n DATA_TYPES.DATE: {\n DATA_SUBTYPES.DATE: 'datetime64', # YYYY-MM-DD\n DATA_SUBTYPES.TIMESTAMP: 'datetime64' # YYYY-MM-DD hh:mm:ss or 1852362464\n },\n DATA_TYPES.CATEGORICAL: {\n DATA_SUBTYPES.SINGLE: 'category',\n DATA_SUBTYPES.MULTIPLE: 'category'\n },\n DATA_TYPES.FILE_PATH: {\n DATA_SUBTYPES.IMAGE: 'object',\n DATA_SUBTYPES.VIDEO: 'object',\n DATA_SUBTYPES.AUDIO: 'object'\n },\n DATA_TYPES.SEQUENTIAL: {\n DATA_SUBTYPES.ARRAY: 'object'\n },\n DATA_TYPES.TEXT: {\n DATA_SUBTYPES.SHORT: 'object',\n DATA_SUBTYPES.RICH: 'object'\n }\n }\n\n columns = [dict(name=x) for x in list(df.keys())]\n\n for column in columns:\n try:\n name = column['name']\n if stats[name].get('empty', {}).get('is_empty', False):\n new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]\n else:\n col_type = stats[name]['typing']['data_type']\n col_subtype = stats[name]['typing']['data_subtype']\n new_type = types_map[col_type][col_subtype]\n if new_type == 'int64' or new_type == 'float64':\n df[name] = df[name].apply(lambda x: x.replace(',', '.') if isinstance(x, str) else x)\n if new_type == 'int64':\n df = df.astype({name: 'float64'})\n df = df.astype({name: new_type})\n except Exception as e:\n print(e)\n print(f'Error: cant convert type of DS column {name} to {new_type}')\n\n return df\n\n\ndef parse_filter(key, value):\n result = re.search(r'filter(_*.*)\\[(.*)\\]', key)\n operator = result.groups()[0].strip('_') or 'like'\n field = result.groups()[1]\n operators_map = {\n 'like': 'like',\n 'in': 'in',\n 'nin': 'not in',\n 'gt': '>',\n 'lt': '<',\n 'gte': '>=',\n 'lte': '<=',\n 'eq': '=',\n 'neq': '!='\n }\n if operator not in operators_map:\n return None\n operator = operators_map[operator]\n return {'field': field, 'value': value, 'operator': operator}\n\n\ndef prepare_sql_where(where):\n marks = {}\n if len(where) > 0:\n for i in range(len(where)):\n field = where[i]['field'].replace('\"', '\"\"')\n operator = where[i]['operator']\n value = where[i]['value']\n var_name = f'var{i}'\n if ' ' in field:\n field = f'\"{field}\"'\n if operator == 'like':\n marks[var_name] = '%' + value + '%'\n else:\n marks[var_name] = value\n where[i] = f'{field} {operator} :var{i}'\n where = 'where ' + ' and '.join(where)\n else:\n where = ''\n return where, marks\n\n\ndef get_sqlite_columns_names(cursor):\n cursor.execute('pragma table_info(data);')\n column_name_index = [x[0] for x in cursor.description].index('name')\n columns = cursor.fetchall()\n return [x[column_name_index] for x in columns]\n\n\ndef get_sqlite_data(db_path, where, limit, offset):\n where = [] if where is None else where\n\n con = sqlite3.connect(db_path)\n cur = con.cursor()\n\n offset = '' if limit is None or offset is None else f'offset {offset}'\n limit = '' if limit is None else f'limit {limit}'\n\n columns_names = get_sqlite_columns_names(cur)\n where = [x for x in where if x['field'] in columns_names]\n where, marks = prepare_sql_where(where)\n\n count_query = ' '.join(['select count(1) from data', where])\n cur.execute(count_query, marks)\n rowcount = cur.fetchone()[0]\n\n query = ' '.join(['select * from data', where, limit, offset])\n cur.execute(query, marks)\n data = cur.fetchall()\n data = [dict(zip(columns_names, x)) for x in data]\n\n cur.close()\n con.close()\n\n return {\n 'data': data,\n 'rowcount': rowcount,\n 'columns_names': columns_names\n }\n", "path": "mindsdb/interfaces/datastore/sqlite_helpers.py"}]}
| 2,841 | 568 |
gh_patches_debug_27707
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-8345
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
puregym_gb spider broken
It looks like the PureGym spider (puregym_gb.py) is broken. For the last few weeks it was consistently returning 303 results, but then on 2024-05-04 the number dropped to 1. Looking at the Spider stats, I think it's found 401 branches, but then dropped all but one of them for having duplicate refs.
If I've understood what's going on correctly, the spider code is obtaining a ref from a "gymId" parameter on each individual branch page. However, I don't think this parameter exists any more on those pages. Hence each branch gets the same null ref value, and then all the duplicates are dropped.
Hopefully removing the custom ref code will fix the spider. I think there should be around 400 branches in total. (There's currently 399 unique https://www.puregym.com/gyms/*/ URLs in the sitemap.) So probably even the 303 results being returned consistently before was as a result of incorrectly dropping ~100 gyms.
I've just checked a sample of 15 of the URLs from the sitemap. I found one "temporarily closed" https://www.puregym.com/gyms/tonbridge/ and one "coming soon" https://www.puregym.com/gyms/washington/ . I don't know if it's worth trying to catch these cases or not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/puregym_gb.py`
Content:
```
1 from scrapy.spiders import SitemapSpider
2
3 from locations.google_url import extract_google_position
4 from locations.structured_data_spider import StructuredDataSpider
5
6
7 class PureGymGBSpider(SitemapSpider, StructuredDataSpider):
8 name = "puregym_gb"
9 item_attributes = {
10 "brand": "PureGym",
11 "brand_wikidata": "Q18345898",
12 "country": "GB",
13 }
14 allowed_domains = ["www.puregym.com"]
15 sitemap_urls = ["https://www.puregym.com/sitemap.xml"]
16 sitemap_rules = [
17 (
18 r"https:\/\/www\.puregym\.com\/gyms\/([\w-]+)\/$",
19 "parse_sd",
20 ),
21 ]
22 wanted_types = ["HealthClub"]
23
24 def inspect_item(self, item, response):
25 item["ref"] = response.xpath('//meta[@itemprop="gymId"]/@content').get()
26 extract_google_position(item, response)
27
28 yield item
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/puregym_gb.py b/locations/spiders/puregym_gb.py
--- a/locations/spiders/puregym_gb.py
+++ b/locations/spiders/puregym_gb.py
@@ -1,28 +1,28 @@
+from urllib.parse import parse_qs, urlparse
+
from scrapy.spiders import SitemapSpider
-from locations.google_url import extract_google_position
from locations.structured_data_spider import StructuredDataSpider
class PureGymGBSpider(SitemapSpider, StructuredDataSpider):
name = "puregym_gb"
- item_attributes = {
- "brand": "PureGym",
- "brand_wikidata": "Q18345898",
- "country": "GB",
- }
+ item_attributes = {"brand": "PureGym", "brand_wikidata": "Q18345898", "country": "GB"}
allowed_domains = ["www.puregym.com"]
sitemap_urls = ["https://www.puregym.com/sitemap.xml"]
- sitemap_rules = [
- (
- r"https:\/\/www\.puregym\.com\/gyms\/([\w-]+)\/$",
- "parse_sd",
- ),
- ]
+ sitemap_rules = [(r"/gyms/([^/]+)/$", "parse_sd")]
wanted_types = ["HealthClub"]
- def inspect_item(self, item, response):
- item["ref"] = response.xpath('//meta[@itemprop="gymId"]/@content').get()
- extract_google_position(item, response)
+ def pre_process_data(self, ld_data, **kwargs):
+ ld_data["address"] = ld_data.get("location", {}).get("address")
+
+ def post_process_item(self, item, response, ld_data, **kwargs):
+ item["branch"] = item.pop("name")
+ item["image"] = None
+
+ if img := response.xpath('//img[contains(@src, "tiles.stadiamaps.com")]/@src').get():
+ q = parse_qs(urlparse(img)[4])
+ if "center" in q:
+ item["lat"], item["lon"] = q["center"][0].split(",", 1)
yield item
|
{"golden_diff": "diff --git a/locations/spiders/puregym_gb.py b/locations/spiders/puregym_gb.py\n--- a/locations/spiders/puregym_gb.py\n+++ b/locations/spiders/puregym_gb.py\n@@ -1,28 +1,28 @@\n+from urllib.parse import parse_qs, urlparse\n+\n from scrapy.spiders import SitemapSpider\n \n-from locations.google_url import extract_google_position\n from locations.structured_data_spider import StructuredDataSpider\n \n \n class PureGymGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"puregym_gb\"\n- item_attributes = {\n- \"brand\": \"PureGym\",\n- \"brand_wikidata\": \"Q18345898\",\n- \"country\": \"GB\",\n- }\n+ item_attributes = {\"brand\": \"PureGym\", \"brand_wikidata\": \"Q18345898\", \"country\": \"GB\"}\n allowed_domains = [\"www.puregym.com\"]\n sitemap_urls = [\"https://www.puregym.com/sitemap.xml\"]\n- sitemap_rules = [\n- (\n- r\"https:\\/\\/www\\.puregym\\.com\\/gyms\\/([\\w-]+)\\/$\",\n- \"parse_sd\",\n- ),\n- ]\n+ sitemap_rules = [(r\"/gyms/([^/]+)/$\", \"parse_sd\")]\n wanted_types = [\"HealthClub\"]\n \n- def inspect_item(self, item, response):\n- item[\"ref\"] = response.xpath('//meta[@itemprop=\"gymId\"]/@content').get()\n- extract_google_position(item, response)\n+ def pre_process_data(self, ld_data, **kwargs):\n+ ld_data[\"address\"] = ld_data.get(\"location\", {}).get(\"address\")\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ item[\"branch\"] = item.pop(\"name\")\n+ item[\"image\"] = None\n+\n+ if img := response.xpath('//img[contains(@src, \"tiles.stadiamaps.com\")]/@src').get():\n+ q = parse_qs(urlparse(img)[4])\n+ if \"center\" in q:\n+ item[\"lat\"], item[\"lon\"] = q[\"center\"][0].split(\",\", 1)\n \n yield item\n", "issue": "puregym_gb spider broken\nIt looks like the PureGym spider (puregym_gb.py) is broken. For the last few weeks it was consistently returning 303 results, but then on 2024-05-04 the number dropped to 1. Looking at the Spider stats, I think it's found 401 branches, but then dropped all but one of them for having duplicate refs.\r\n\r\nIf I've understood what's going on correctly, the spider code is obtaining a ref from a \"gymId\" parameter on each individual branch page. However, I don't think this parameter exists any more on those pages. Hence each branch gets the same null ref value, and then all the duplicates are dropped.\r\n\r\nHopefully removing the custom ref code will fix the spider. I think there should be around 400 branches in total. (There's currently 399 unique https://www.puregym.com/gyms/*/ URLs in the sitemap.) So probably even the 303 results being returned consistently before was as a result of incorrectly dropping ~100 gyms.\r\n\r\nI've just checked a sample of 15 of the URLs from the sitemap. I found one \"temporarily closed\" https://www.puregym.com/gyms/tonbridge/ and one \"coming soon\" https://www.puregym.com/gyms/washington/ . I don't know if it's worth trying to catch these cases or not.\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.google_url import extract_google_position\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass PureGymGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"puregym_gb\"\n item_attributes = {\n \"brand\": \"PureGym\",\n \"brand_wikidata\": \"Q18345898\",\n \"country\": \"GB\",\n }\n allowed_domains = [\"www.puregym.com\"]\n sitemap_urls = [\"https://www.puregym.com/sitemap.xml\"]\n sitemap_rules = [\n (\n r\"https:\\/\\/www\\.puregym\\.com\\/gyms\\/([\\w-]+)\\/$\",\n \"parse_sd\",\n ),\n ]\n wanted_types = [\"HealthClub\"]\n\n def inspect_item(self, item, response):\n item[\"ref\"] = response.xpath('//meta[@itemprop=\"gymId\"]/@content').get()\n extract_google_position(item, response)\n\n yield item\n", "path": "locations/spiders/puregym_gb.py"}], "after_files": [{"content": "from urllib.parse import parse_qs, urlparse\n\nfrom scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass PureGymGBSpider(SitemapSpider, StructuredDataSpider):\n name = \"puregym_gb\"\n item_attributes = {\"brand\": \"PureGym\", \"brand_wikidata\": \"Q18345898\", \"country\": \"GB\"}\n allowed_domains = [\"www.puregym.com\"]\n sitemap_urls = [\"https://www.puregym.com/sitemap.xml\"]\n sitemap_rules = [(r\"/gyms/([^/]+)/$\", \"parse_sd\")]\n wanted_types = [\"HealthClub\"]\n\n def pre_process_data(self, ld_data, **kwargs):\n ld_data[\"address\"] = ld_data.get(\"location\", {}).get(\"address\")\n\n def post_process_item(self, item, response, ld_data, **kwargs):\n item[\"branch\"] = item.pop(\"name\")\n item[\"image\"] = None\n\n if img := response.xpath('//img[contains(@src, \"tiles.stadiamaps.com\")]/@src').get():\n q = parse_qs(urlparse(img)[4])\n if \"center\" in q:\n item[\"lat\"], item[\"lon\"] = q[\"center\"][0].split(\",\", 1)\n\n yield item\n", "path": "locations/spiders/puregym_gb.py"}]}
| 848 | 511 |
gh_patches_debug_8849
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-1118
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ls broken after merge of #1089
After the merge of #1089 ls in the qtconsole prints blank lines and not the contents of the dir.
On ubuntu 11.10 using python2.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/frontend/qt/console/ansi_code_processor.py`
Content:
```
1 """ Utilities for processing ANSI escape codes and special ASCII characters.
2 """
3 #-----------------------------------------------------------------------------
4 # Imports
5 #-----------------------------------------------------------------------------
6
7 # Standard library imports
8 from collections import namedtuple
9 import re
10
11 # System library imports
12 from IPython.external.qt import QtCore, QtGui
13
14 #-----------------------------------------------------------------------------
15 # Constants and datatypes
16 #-----------------------------------------------------------------------------
17
18 # An action for erase requests (ED and EL commands).
19 EraseAction = namedtuple('EraseAction', ['action', 'area', 'erase_to'])
20
21 # An action for cursor move requests (CUU, CUD, CUF, CUB, CNL, CPL, CHA, CUP,
22 # and HVP commands).
23 # FIXME: Not implemented in AnsiCodeProcessor.
24 MoveAction = namedtuple('MoveAction', ['action', 'dir', 'unit', 'count'])
25
26 # An action for scroll requests (SU and ST) and form feeds.
27 ScrollAction = namedtuple('ScrollAction', ['action', 'dir', 'unit', 'count'])
28
29 # An action for the carriage return character
30 CarriageReturnAction = namedtuple('CarriageReturnAction', ['action'])
31
32 # An action for the beep character
33 BeepAction = namedtuple('BeepAction', ['action'])
34
35 # Regular expressions.
36 CSI_COMMANDS = 'ABCDEFGHJKSTfmnsu'
37 CSI_SUBPATTERN = '\[(.*?)([%s])' % CSI_COMMANDS
38 OSC_SUBPATTERN = '\](.*?)[\x07\x1b]'
39 ANSI_PATTERN = ('\x01?\x1b(%s|%s)\x02?' % \
40 (CSI_SUBPATTERN, OSC_SUBPATTERN))
41 ANSI_OR_SPECIAL_PATTERN = re.compile('(\b|\r)|(?:%s)' % ANSI_PATTERN)
42 SPECIAL_PATTERN = re.compile('([\f])')
43
44 #-----------------------------------------------------------------------------
45 # Classes
46 #-----------------------------------------------------------------------------
47
48 class AnsiCodeProcessor(object):
49 """ Translates special ASCII characters and ANSI escape codes into readable
50 attributes. It also supports a few non-standard, xterm-specific codes.
51 """
52
53 # Whether to increase intensity or set boldness for SGR code 1.
54 # (Different terminals handle this in different ways.)
55 bold_text_enabled = False
56
57 # We provide an empty default color map because subclasses will likely want
58 # to use a custom color format.
59 default_color_map = {}
60
61 #---------------------------------------------------------------------------
62 # AnsiCodeProcessor interface
63 #---------------------------------------------------------------------------
64
65 def __init__(self):
66 self.actions = []
67 self.color_map = self.default_color_map.copy()
68 self.reset_sgr()
69
70 def reset_sgr(self):
71 """ Reset graphics attributs to their default values.
72 """
73 self.intensity = 0
74 self.italic = False
75 self.bold = False
76 self.underline = False
77 self.foreground_color = None
78 self.background_color = None
79
80 def split_string(self, string):
81 """ Yields substrings for which the same escape code applies.
82 """
83 self.actions = []
84 start = 0
85
86 for match in ANSI_OR_SPECIAL_PATTERN.finditer(string):
87 raw = string[start:match.start()]
88 substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
89 if substring or self.actions:
90 yield substring
91 start = match.end()
92
93 self.actions = []
94 groups = filter(lambda x: x is not None, match.groups())
95 if groups[0] == '\r':
96 self.actions.append(CarriageReturnAction('carriage-return'))
97 yield ''
98 elif groups[0] == '\b':
99 self.actions.append(BeepAction('beep'))
100 yield ''
101 else:
102 params = [ param for param in groups[1].split(';') if param ]
103 if groups[0].startswith('['):
104 # Case 1: CSI code.
105 try:
106 params = map(int, params)
107 except ValueError:
108 # Silently discard badly formed codes.
109 pass
110 else:
111 self.set_csi_code(groups[2], params)
112
113 elif groups[0].startswith(']'):
114 # Case 2: OSC code.
115 self.set_osc_code(params)
116
117 raw = string[start:]
118 substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
119 if substring or self.actions:
120 yield substring
121
122 def set_csi_code(self, command, params=[]):
123 """ Set attributes based on CSI (Control Sequence Introducer) code.
124
125 Parameters
126 ----------
127 command : str
128 The code identifier, i.e. the final character in the sequence.
129
130 params : sequence of integers, optional
131 The parameter codes for the command.
132 """
133 if command == 'm': # SGR - Select Graphic Rendition
134 if params:
135 self.set_sgr_code(params)
136 else:
137 self.set_sgr_code([0])
138
139 elif (command == 'J' or # ED - Erase Data
140 command == 'K'): # EL - Erase in Line
141 code = params[0] if params else 0
142 if 0 <= code <= 2:
143 area = 'screen' if command == 'J' else 'line'
144 if code == 0:
145 erase_to = 'end'
146 elif code == 1:
147 erase_to = 'start'
148 elif code == 2:
149 erase_to = 'all'
150 self.actions.append(EraseAction('erase', area, erase_to))
151
152 elif (command == 'S' or # SU - Scroll Up
153 command == 'T'): # SD - Scroll Down
154 dir = 'up' if command == 'S' else 'down'
155 count = params[0] if params else 1
156 self.actions.append(ScrollAction('scroll', dir, 'line', count))
157
158 def set_osc_code(self, params):
159 """ Set attributes based on OSC (Operating System Command) parameters.
160
161 Parameters
162 ----------
163 params : sequence of str
164 The parameters for the command.
165 """
166 try:
167 command = int(params.pop(0))
168 except (IndexError, ValueError):
169 return
170
171 if command == 4:
172 # xterm-specific: set color number to color spec.
173 try:
174 color = int(params.pop(0))
175 spec = params.pop(0)
176 self.color_map[color] = self._parse_xterm_color_spec(spec)
177 except (IndexError, ValueError):
178 pass
179
180 def set_sgr_code(self, params):
181 """ Set attributes based on SGR (Select Graphic Rendition) codes.
182
183 Parameters
184 ----------
185 params : sequence of ints
186 A list of SGR codes for one or more SGR commands. Usually this
187 sequence will have one element per command, although certain
188 xterm-specific commands requires multiple elements.
189 """
190 # Always consume the first parameter.
191 if not params:
192 return
193 code = params.pop(0)
194
195 if code == 0:
196 self.reset_sgr()
197 elif code == 1:
198 if self.bold_text_enabled:
199 self.bold = True
200 else:
201 self.intensity = 1
202 elif code == 2:
203 self.intensity = 0
204 elif code == 3:
205 self.italic = True
206 elif code == 4:
207 self.underline = True
208 elif code == 22:
209 self.intensity = 0
210 self.bold = False
211 elif code == 23:
212 self.italic = False
213 elif code == 24:
214 self.underline = False
215 elif code >= 30 and code <= 37:
216 self.foreground_color = code - 30
217 elif code == 38 and params and params.pop(0) == 5:
218 # xterm-specific: 256 color support.
219 if params:
220 self.foreground_color = params.pop(0)
221 elif code == 39:
222 self.foreground_color = None
223 elif code >= 40 and code <= 47:
224 self.background_color = code - 40
225 elif code == 48 and params and params.pop(0) == 5:
226 # xterm-specific: 256 color support.
227 if params:
228 self.background_color = params.pop(0)
229 elif code == 49:
230 self.background_color = None
231
232 # Recurse with unconsumed parameters.
233 self.set_sgr_code(params)
234
235 #---------------------------------------------------------------------------
236 # Protected interface
237 #---------------------------------------------------------------------------
238
239 def _parse_xterm_color_spec(self, spec):
240 if spec.startswith('rgb:'):
241 return tuple(map(lambda x: int(x, 16), spec[4:].split('/')))
242 elif spec.startswith('rgbi:'):
243 return tuple(map(lambda x: int(float(x) * 255),
244 spec[5:].split('/')))
245 elif spec == '?':
246 raise ValueError('Unsupported xterm color spec')
247 return spec
248
249 def _replace_special(self, match):
250 special = match.group(1)
251 if special == '\f':
252 self.actions.append(ScrollAction('scroll', 'down', 'page', 1))
253 return ''
254
255
256 class QtAnsiCodeProcessor(AnsiCodeProcessor):
257 """ Translates ANSI escape codes into QTextCharFormats.
258 """
259
260 # A map from ANSI color codes to SVG color names or RGB(A) tuples.
261 darkbg_color_map = {
262 0 : 'black', # black
263 1 : 'darkred', # red
264 2 : 'darkgreen', # green
265 3 : 'brown', # yellow
266 4 : 'darkblue', # blue
267 5 : 'darkviolet', # magenta
268 6 : 'steelblue', # cyan
269 7 : 'grey', # white
270 8 : 'grey', # black (bright)
271 9 : 'red', # red (bright)
272 10 : 'lime', # green (bright)
273 11 : 'yellow', # yellow (bright)
274 12 : 'deepskyblue', # blue (bright)
275 13 : 'magenta', # magenta (bright)
276 14 : 'cyan', # cyan (bright)
277 15 : 'white' } # white (bright)
278
279 # Set the default color map for super class.
280 default_color_map = darkbg_color_map.copy()
281
282 def get_color(self, color, intensity=0):
283 """ Returns a QColor for a given color code, or None if one cannot be
284 constructed.
285 """
286 if color is None:
287 return None
288
289 # Adjust for intensity, if possible.
290 if color < 8 and intensity > 0:
291 color += 8
292
293 constructor = self.color_map.get(color, None)
294 if isinstance(constructor, basestring):
295 # If this is an X11 color name, we just hope there is a close SVG
296 # color name. We could use QColor's static method
297 # 'setAllowX11ColorNames()', but this is global and only available
298 # on X11. It seems cleaner to aim for uniformity of behavior.
299 return QtGui.QColor(constructor)
300
301 elif isinstance(constructor, (tuple, list)):
302 return QtGui.QColor(*constructor)
303
304 return None
305
306 def get_format(self):
307 """ Returns a QTextCharFormat that encodes the current style attributes.
308 """
309 format = QtGui.QTextCharFormat()
310
311 # Set foreground color
312 qcolor = self.get_color(self.foreground_color, self.intensity)
313 if qcolor is not None:
314 format.setForeground(qcolor)
315
316 # Set background color
317 qcolor = self.get_color(self.background_color, self.intensity)
318 if qcolor is not None:
319 format.setBackground(qcolor)
320
321 # Set font weight/style options
322 if self.bold:
323 format.setFontWeight(QtGui.QFont.Bold)
324 else:
325 format.setFontWeight(QtGui.QFont.Normal)
326 format.setFontItalic(self.italic)
327 format.setFontUnderline(self.underline)
328
329 return format
330
331 def set_background_color(self, color):
332 """ Given a background color (a QColor), attempt to set a color map
333 that will be aesthetically pleasing.
334 """
335 # Set a new default color map.
336 self.default_color_map = self.darkbg_color_map.copy()
337
338 if color.value() >= 127:
339 # Colors appropriate for a terminal with a light background. For
340 # now, only use non-bright colors...
341 for i in xrange(8):
342 self.default_color_map[i + 8] = self.default_color_map[i]
343
344 # ...and replace white with black.
345 self.default_color_map[7] = self.default_color_map[15] = 'black'
346
347 # Update the current color map with the new defaults.
348 self.color_map.update(self.default_color_map)
349
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/IPython/frontend/qt/console/ansi_code_processor.py b/IPython/frontend/qt/console/ansi_code_processor.py
--- a/IPython/frontend/qt/console/ansi_code_processor.py
+++ b/IPython/frontend/qt/console/ansi_code_processor.py
@@ -38,7 +38,7 @@
OSC_SUBPATTERN = '\](.*?)[\x07\x1b]'
ANSI_PATTERN = ('\x01?\x1b(%s|%s)\x02?' % \
(CSI_SUBPATTERN, OSC_SUBPATTERN))
-ANSI_OR_SPECIAL_PATTERN = re.compile('(\b|\r)|(?:%s)' % ANSI_PATTERN)
+ANSI_OR_SPECIAL_PATTERN = re.compile('(\b|\r(?!\n))|(?:%s)' % ANSI_PATTERN)
SPECIAL_PATTERN = re.compile('([\f])')
#-----------------------------------------------------------------------------
|
{"golden_diff": "diff --git a/IPython/frontend/qt/console/ansi_code_processor.py b/IPython/frontend/qt/console/ansi_code_processor.py\n--- a/IPython/frontend/qt/console/ansi_code_processor.py\n+++ b/IPython/frontend/qt/console/ansi_code_processor.py\n@@ -38,7 +38,7 @@\n OSC_SUBPATTERN = '\\](.*?)[\\x07\\x1b]'\n ANSI_PATTERN = ('\\x01?\\x1b(%s|%s)\\x02?' % \\\n (CSI_SUBPATTERN, OSC_SUBPATTERN))\n-ANSI_OR_SPECIAL_PATTERN = re.compile('(\\b|\\r)|(?:%s)' % ANSI_PATTERN)\n+ANSI_OR_SPECIAL_PATTERN = re.compile('(\\b|\\r(?!\\n))|(?:%s)' % ANSI_PATTERN)\n SPECIAL_PATTERN = re.compile('([\\f])')\n \n #-----------------------------------------------------------------------------\n", "issue": "ls broken after merge of #1089\nAfter the merge of #1089 ls in the qtconsole prints blank lines and not the contents of the dir.\nOn ubuntu 11.10 using python2.7\n\n", "before_files": [{"content": "\"\"\" Utilities for processing ANSI escape codes and special ASCII characters.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nfrom collections import namedtuple\nimport re\n\n# System library imports\nfrom IPython.external.qt import QtCore, QtGui\n\n#-----------------------------------------------------------------------------\n# Constants and datatypes\n#-----------------------------------------------------------------------------\n\n# An action for erase requests (ED and EL commands).\nEraseAction = namedtuple('EraseAction', ['action', 'area', 'erase_to'])\n\n# An action for cursor move requests (CUU, CUD, CUF, CUB, CNL, CPL, CHA, CUP,\n# and HVP commands).\n# FIXME: Not implemented in AnsiCodeProcessor.\nMoveAction = namedtuple('MoveAction', ['action', 'dir', 'unit', 'count'])\n\n# An action for scroll requests (SU and ST) and form feeds.\nScrollAction = namedtuple('ScrollAction', ['action', 'dir', 'unit', 'count'])\n\n# An action for the carriage return character\nCarriageReturnAction = namedtuple('CarriageReturnAction', ['action'])\n\n# An action for the beep character\nBeepAction = namedtuple('BeepAction', ['action'])\n\n# Regular expressions.\nCSI_COMMANDS = 'ABCDEFGHJKSTfmnsu'\nCSI_SUBPATTERN = '\\[(.*?)([%s])' % CSI_COMMANDS\nOSC_SUBPATTERN = '\\](.*?)[\\x07\\x1b]'\nANSI_PATTERN = ('\\x01?\\x1b(%s|%s)\\x02?' % \\\n (CSI_SUBPATTERN, OSC_SUBPATTERN))\nANSI_OR_SPECIAL_PATTERN = re.compile('(\\b|\\r)|(?:%s)' % ANSI_PATTERN)\nSPECIAL_PATTERN = re.compile('([\\f])')\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass AnsiCodeProcessor(object):\n \"\"\" Translates special ASCII characters and ANSI escape codes into readable\n attributes. It also supports a few non-standard, xterm-specific codes.\n \"\"\"\n\n # Whether to increase intensity or set boldness for SGR code 1.\n # (Different terminals handle this in different ways.)\n bold_text_enabled = False\n\n # We provide an empty default color map because subclasses will likely want\n # to use a custom color format.\n default_color_map = {}\n\n #---------------------------------------------------------------------------\n # AnsiCodeProcessor interface\n #---------------------------------------------------------------------------\n\n def __init__(self):\n self.actions = []\n self.color_map = self.default_color_map.copy()\n self.reset_sgr()\n\n def reset_sgr(self):\n \"\"\" Reset graphics attributs to their default values.\n \"\"\"\n self.intensity = 0\n self.italic = False\n self.bold = False\n self.underline = False\n self.foreground_color = None\n self.background_color = None\n\n def split_string(self, string):\n \"\"\" Yields substrings for which the same escape code applies.\n \"\"\"\n self.actions = []\n start = 0\n\n for match in ANSI_OR_SPECIAL_PATTERN.finditer(string):\n raw = string[start:match.start()]\n substring = SPECIAL_PATTERN.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring\n start = match.end()\n\n self.actions = []\n groups = filter(lambda x: x is not None, match.groups())\n if groups[0] == '\\r':\n self.actions.append(CarriageReturnAction('carriage-return'))\n yield ''\n elif groups[0] == '\\b':\n self.actions.append(BeepAction('beep'))\n yield ''\n else:\n params = [ param for param in groups[1].split(';') if param ]\n if groups[0].startswith('['):\n # Case 1: CSI code.\n try:\n params = map(int, params)\n except ValueError:\n # Silently discard badly formed codes.\n pass\n else:\n self.set_csi_code(groups[2], params)\n\n elif groups[0].startswith(']'):\n # Case 2: OSC code.\n self.set_osc_code(params)\n\n raw = string[start:]\n substring = SPECIAL_PATTERN.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring\n\n def set_csi_code(self, command, params=[]):\n \"\"\" Set attributes based on CSI (Control Sequence Introducer) code.\n\n Parameters\n ----------\n command : str\n The code identifier, i.e. the final character in the sequence.\n\n params : sequence of integers, optional\n The parameter codes for the command.\n \"\"\"\n if command == 'm': # SGR - Select Graphic Rendition\n if params:\n self.set_sgr_code(params)\n else:\n self.set_sgr_code([0])\n\n elif (command == 'J' or # ED - Erase Data\n command == 'K'): # EL - Erase in Line\n code = params[0] if params else 0\n if 0 <= code <= 2:\n area = 'screen' if command == 'J' else 'line'\n if code == 0:\n erase_to = 'end'\n elif code == 1:\n erase_to = 'start'\n elif code == 2:\n erase_to = 'all'\n self.actions.append(EraseAction('erase', area, erase_to))\n\n elif (command == 'S' or # SU - Scroll Up\n command == 'T'): # SD - Scroll Down\n dir = 'up' if command == 'S' else 'down'\n count = params[0] if params else 1\n self.actions.append(ScrollAction('scroll', dir, 'line', count))\n\n def set_osc_code(self, params):\n \"\"\" Set attributes based on OSC (Operating System Command) parameters.\n\n Parameters\n ----------\n params : sequence of str\n The parameters for the command.\n \"\"\"\n try:\n command = int(params.pop(0))\n except (IndexError, ValueError):\n return\n\n if command == 4:\n # xterm-specific: set color number to color spec.\n try:\n color = int(params.pop(0))\n spec = params.pop(0)\n self.color_map[color] = self._parse_xterm_color_spec(spec)\n except (IndexError, ValueError):\n pass\n\n def set_sgr_code(self, params):\n \"\"\" Set attributes based on SGR (Select Graphic Rendition) codes.\n\n Parameters\n ----------\n params : sequence of ints\n A list of SGR codes for one or more SGR commands. Usually this\n sequence will have one element per command, although certain\n xterm-specific commands requires multiple elements.\n \"\"\"\n # Always consume the first parameter.\n if not params:\n return\n code = params.pop(0)\n\n if code == 0:\n self.reset_sgr()\n elif code == 1:\n if self.bold_text_enabled:\n self.bold = True\n else:\n self.intensity = 1\n elif code == 2:\n self.intensity = 0\n elif code == 3:\n self.italic = True\n elif code == 4:\n self.underline = True\n elif code == 22:\n self.intensity = 0\n self.bold = False\n elif code == 23:\n self.italic = False\n elif code == 24:\n self.underline = False\n elif code >= 30 and code <= 37:\n self.foreground_color = code - 30\n elif code == 38 and params and params.pop(0) == 5:\n # xterm-specific: 256 color support.\n if params:\n self.foreground_color = params.pop(0)\n elif code == 39:\n self.foreground_color = None\n elif code >= 40 and code <= 47:\n self.background_color = code - 40\n elif code == 48 and params and params.pop(0) == 5:\n # xterm-specific: 256 color support.\n if params:\n self.background_color = params.pop(0)\n elif code == 49:\n self.background_color = None\n\n # Recurse with unconsumed parameters.\n self.set_sgr_code(params)\n\n #---------------------------------------------------------------------------\n # Protected interface\n #---------------------------------------------------------------------------\n\n def _parse_xterm_color_spec(self, spec):\n if spec.startswith('rgb:'):\n return tuple(map(lambda x: int(x, 16), spec[4:].split('/')))\n elif spec.startswith('rgbi:'):\n return tuple(map(lambda x: int(float(x) * 255),\n spec[5:].split('/')))\n elif spec == '?':\n raise ValueError('Unsupported xterm color spec')\n return spec\n\n def _replace_special(self, match):\n special = match.group(1)\n if special == '\\f':\n self.actions.append(ScrollAction('scroll', 'down', 'page', 1))\n return ''\n\n\nclass QtAnsiCodeProcessor(AnsiCodeProcessor):\n \"\"\" Translates ANSI escape codes into QTextCharFormats.\n \"\"\"\n\n # A map from ANSI color codes to SVG color names or RGB(A) tuples.\n darkbg_color_map = {\n 0 : 'black', # black\n 1 : 'darkred', # red\n 2 : 'darkgreen', # green\n 3 : 'brown', # yellow\n 4 : 'darkblue', # blue\n 5 : 'darkviolet', # magenta\n 6 : 'steelblue', # cyan\n 7 : 'grey', # white\n 8 : 'grey', # black (bright)\n 9 : 'red', # red (bright)\n 10 : 'lime', # green (bright)\n 11 : 'yellow', # yellow (bright)\n 12 : 'deepskyblue', # blue (bright)\n 13 : 'magenta', # magenta (bright)\n 14 : 'cyan', # cyan (bright)\n 15 : 'white' } # white (bright)\n\n # Set the default color map for super class.\n default_color_map = darkbg_color_map.copy()\n\n def get_color(self, color, intensity=0):\n \"\"\" Returns a QColor for a given color code, or None if one cannot be\n constructed.\n \"\"\"\n if color is None:\n return None\n\n # Adjust for intensity, if possible.\n if color < 8 and intensity > 0:\n color += 8\n\n constructor = self.color_map.get(color, None)\n if isinstance(constructor, basestring):\n # If this is an X11 color name, we just hope there is a close SVG\n # color name. We could use QColor's static method\n # 'setAllowX11ColorNames()', but this is global and only available\n # on X11. It seems cleaner to aim for uniformity of behavior.\n return QtGui.QColor(constructor)\n\n elif isinstance(constructor, (tuple, list)):\n return QtGui.QColor(*constructor)\n\n return None\n\n def get_format(self):\n \"\"\" Returns a QTextCharFormat that encodes the current style attributes.\n \"\"\"\n format = QtGui.QTextCharFormat()\n\n # Set foreground color\n qcolor = self.get_color(self.foreground_color, self.intensity)\n if qcolor is not None:\n format.setForeground(qcolor)\n\n # Set background color\n qcolor = self.get_color(self.background_color, self.intensity)\n if qcolor is not None:\n format.setBackground(qcolor)\n\n # Set font weight/style options\n if self.bold:\n format.setFontWeight(QtGui.QFont.Bold)\n else:\n format.setFontWeight(QtGui.QFont.Normal)\n format.setFontItalic(self.italic)\n format.setFontUnderline(self.underline)\n\n return format\n\n def set_background_color(self, color):\n \"\"\" Given a background color (a QColor), attempt to set a color map\n that will be aesthetically pleasing.\n \"\"\"\n # Set a new default color map.\n self.default_color_map = self.darkbg_color_map.copy()\n\n if color.value() >= 127:\n # Colors appropriate for a terminal with a light background. For\n # now, only use non-bright colors...\n for i in xrange(8):\n self.default_color_map[i + 8] = self.default_color_map[i]\n\n # ...and replace white with black.\n self.default_color_map[7] = self.default_color_map[15] = 'black'\n\n # Update the current color map with the new defaults.\n self.color_map.update(self.default_color_map)\n", "path": "IPython/frontend/qt/console/ansi_code_processor.py"}], "after_files": [{"content": "\"\"\" Utilities for processing ANSI escape codes and special ASCII characters.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nfrom collections import namedtuple\nimport re\n\n# System library imports\nfrom IPython.external.qt import QtCore, QtGui\n\n#-----------------------------------------------------------------------------\n# Constants and datatypes\n#-----------------------------------------------------------------------------\n\n# An action for erase requests (ED and EL commands).\nEraseAction = namedtuple('EraseAction', ['action', 'area', 'erase_to'])\n\n# An action for cursor move requests (CUU, CUD, CUF, CUB, CNL, CPL, CHA, CUP,\n# and HVP commands).\n# FIXME: Not implemented in AnsiCodeProcessor.\nMoveAction = namedtuple('MoveAction', ['action', 'dir', 'unit', 'count'])\n\n# An action for scroll requests (SU and ST) and form feeds.\nScrollAction = namedtuple('ScrollAction', ['action', 'dir', 'unit', 'count'])\n\n# An action for the carriage return character\nCarriageReturnAction = namedtuple('CarriageReturnAction', ['action'])\n\n# An action for the beep character\nBeepAction = namedtuple('BeepAction', ['action'])\n\n# Regular expressions.\nCSI_COMMANDS = 'ABCDEFGHJKSTfmnsu'\nCSI_SUBPATTERN = '\\[(.*?)([%s])' % CSI_COMMANDS\nOSC_SUBPATTERN = '\\](.*?)[\\x07\\x1b]'\nANSI_PATTERN = ('\\x01?\\x1b(%s|%s)\\x02?' % \\\n (CSI_SUBPATTERN, OSC_SUBPATTERN))\nANSI_OR_SPECIAL_PATTERN = re.compile('(\\b|\\r(?!\\n))|(?:%s)' % ANSI_PATTERN)\nSPECIAL_PATTERN = re.compile('([\\f])')\n\n#-----------------------------------------------------------------------------\n# Classes\n#-----------------------------------------------------------------------------\n\nclass AnsiCodeProcessor(object):\n \"\"\" Translates special ASCII characters and ANSI escape codes into readable\n attributes. It also supports a few non-standard, xterm-specific codes.\n \"\"\"\n\n # Whether to increase intensity or set boldness for SGR code 1.\n # (Different terminals handle this in different ways.)\n bold_text_enabled = False\n\n # We provide an empty default color map because subclasses will likely want\n # to use a custom color format.\n default_color_map = {}\n\n #---------------------------------------------------------------------------\n # AnsiCodeProcessor interface\n #---------------------------------------------------------------------------\n\n def __init__(self):\n self.actions = []\n self.color_map = self.default_color_map.copy()\n self.reset_sgr()\n\n def reset_sgr(self):\n \"\"\" Reset graphics attributs to their default values.\n \"\"\"\n self.intensity = 0\n self.italic = False\n self.bold = False\n self.underline = False\n self.foreground_color = None\n self.background_color = None\n\n def split_string(self, string):\n \"\"\" Yields substrings for which the same escape code applies.\n \"\"\"\n self.actions = []\n start = 0\n\n for match in ANSI_OR_SPECIAL_PATTERN.finditer(string):\n raw = string[start:match.start()]\n substring = SPECIAL_PATTERN.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring\n start = match.end()\n\n self.actions = []\n groups = filter(lambda x: x is not None, match.groups())\n if groups[0] == '\\r':\n self.actions.append(CarriageReturnAction('carriage-return'))\n yield ''\n elif groups[0] == '\\b':\n self.actions.append(BeepAction('beep'))\n yield ''\n else:\n params = [ param for param in groups[1].split(';') if param ]\n if groups[0].startswith('['):\n # Case 1: CSI code.\n try:\n params = map(int, params)\n except ValueError:\n # Silently discard badly formed codes.\n pass\n else:\n self.set_csi_code(groups[2], params)\n\n elif groups[0].startswith(']'):\n # Case 2: OSC code.\n self.set_osc_code(params)\n\n raw = string[start:]\n substring = SPECIAL_PATTERN.sub(self._replace_special, raw)\n if substring or self.actions:\n yield substring\n\n def set_csi_code(self, command, params=[]):\n \"\"\" Set attributes based on CSI (Control Sequence Introducer) code.\n\n Parameters\n ----------\n command : str\n The code identifier, i.e. the final character in the sequence.\n\n params : sequence of integers, optional\n The parameter codes for the command.\n \"\"\"\n if command == 'm': # SGR - Select Graphic Rendition\n if params:\n self.set_sgr_code(params)\n else:\n self.set_sgr_code([0])\n\n elif (command == 'J' or # ED - Erase Data\n command == 'K'): # EL - Erase in Line\n code = params[0] if params else 0\n if 0 <= code <= 2:\n area = 'screen' if command == 'J' else 'line'\n if code == 0:\n erase_to = 'end'\n elif code == 1:\n erase_to = 'start'\n elif code == 2:\n erase_to = 'all'\n self.actions.append(EraseAction('erase', area, erase_to))\n\n elif (command == 'S' or # SU - Scroll Up\n command == 'T'): # SD - Scroll Down\n dir = 'up' if command == 'S' else 'down'\n count = params[0] if params else 1\n self.actions.append(ScrollAction('scroll', dir, 'line', count))\n\n def set_osc_code(self, params):\n \"\"\" Set attributes based on OSC (Operating System Command) parameters.\n\n Parameters\n ----------\n params : sequence of str\n The parameters for the command.\n \"\"\"\n try:\n command = int(params.pop(0))\n except (IndexError, ValueError):\n return\n\n if command == 4:\n # xterm-specific: set color number to color spec.\n try:\n color = int(params.pop(0))\n spec = params.pop(0)\n self.color_map[color] = self._parse_xterm_color_spec(spec)\n except (IndexError, ValueError):\n pass\n\n def set_sgr_code(self, params):\n \"\"\" Set attributes based on SGR (Select Graphic Rendition) codes.\n\n Parameters\n ----------\n params : sequence of ints\n A list of SGR codes for one or more SGR commands. Usually this\n sequence will have one element per command, although certain\n xterm-specific commands requires multiple elements.\n \"\"\"\n # Always consume the first parameter.\n if not params:\n return\n code = params.pop(0)\n\n if code == 0:\n self.reset_sgr()\n elif code == 1:\n if self.bold_text_enabled:\n self.bold = True\n else:\n self.intensity = 1\n elif code == 2:\n self.intensity = 0\n elif code == 3:\n self.italic = True\n elif code == 4:\n self.underline = True\n elif code == 22:\n self.intensity = 0\n self.bold = False\n elif code == 23:\n self.italic = False\n elif code == 24:\n self.underline = False\n elif code >= 30 and code <= 37:\n self.foreground_color = code - 30\n elif code == 38 and params and params.pop(0) == 5:\n # xterm-specific: 256 color support.\n if params:\n self.foreground_color = params.pop(0)\n elif code == 39:\n self.foreground_color = None\n elif code >= 40 and code <= 47:\n self.background_color = code - 40\n elif code == 48 and params and params.pop(0) == 5:\n # xterm-specific: 256 color support.\n if params:\n self.background_color = params.pop(0)\n elif code == 49:\n self.background_color = None\n\n # Recurse with unconsumed parameters.\n self.set_sgr_code(params)\n\n #---------------------------------------------------------------------------\n # Protected interface\n #---------------------------------------------------------------------------\n\n def _parse_xterm_color_spec(self, spec):\n if spec.startswith('rgb:'):\n return tuple(map(lambda x: int(x, 16), spec[4:].split('/')))\n elif spec.startswith('rgbi:'):\n return tuple(map(lambda x: int(float(x) * 255),\n spec[5:].split('/')))\n elif spec == '?':\n raise ValueError('Unsupported xterm color spec')\n return spec\n\n def _replace_special(self, match):\n special = match.group(1)\n if special == '\\f':\n self.actions.append(ScrollAction('scroll', 'down', 'page', 1))\n return ''\n\n\nclass QtAnsiCodeProcessor(AnsiCodeProcessor):\n \"\"\" Translates ANSI escape codes into QTextCharFormats.\n \"\"\"\n\n # A map from ANSI color codes to SVG color names or RGB(A) tuples.\n darkbg_color_map = {\n 0 : 'black', # black\n 1 : 'darkred', # red\n 2 : 'darkgreen', # green\n 3 : 'brown', # yellow\n 4 : 'darkblue', # blue\n 5 : 'darkviolet', # magenta\n 6 : 'steelblue', # cyan\n 7 : 'grey', # white\n 8 : 'grey', # black (bright)\n 9 : 'red', # red (bright)\n 10 : 'lime', # green (bright)\n 11 : 'yellow', # yellow (bright)\n 12 : 'deepskyblue', # blue (bright)\n 13 : 'magenta', # magenta (bright)\n 14 : 'cyan', # cyan (bright)\n 15 : 'white' } # white (bright)\n\n # Set the default color map for super class.\n default_color_map = darkbg_color_map.copy()\n\n def get_color(self, color, intensity=0):\n \"\"\" Returns a QColor for a given color code, or None if one cannot be\n constructed.\n \"\"\"\n if color is None:\n return None\n\n # Adjust for intensity, if possible.\n if color < 8 and intensity > 0:\n color += 8\n\n constructor = self.color_map.get(color, None)\n if isinstance(constructor, basestring):\n # If this is an X11 color name, we just hope there is a close SVG\n # color name. We could use QColor's static method\n # 'setAllowX11ColorNames()', but this is global and only available\n # on X11. It seems cleaner to aim for uniformity of behavior.\n return QtGui.QColor(constructor)\n\n elif isinstance(constructor, (tuple, list)):\n return QtGui.QColor(*constructor)\n\n return None\n\n def get_format(self):\n \"\"\" Returns a QTextCharFormat that encodes the current style attributes.\n \"\"\"\n format = QtGui.QTextCharFormat()\n\n # Set foreground color\n qcolor = self.get_color(self.foreground_color, self.intensity)\n if qcolor is not None:\n format.setForeground(qcolor)\n\n # Set background color\n qcolor = self.get_color(self.background_color, self.intensity)\n if qcolor is not None:\n format.setBackground(qcolor)\n\n # Set font weight/style options\n if self.bold:\n format.setFontWeight(QtGui.QFont.Bold)\n else:\n format.setFontWeight(QtGui.QFont.Normal)\n format.setFontItalic(self.italic)\n format.setFontUnderline(self.underline)\n\n return format\n\n def set_background_color(self, color):\n \"\"\" Given a background color (a QColor), attempt to set a color map\n that will be aesthetically pleasing.\n \"\"\"\n # Set a new default color map.\n self.default_color_map = self.darkbg_color_map.copy()\n\n if color.value() >= 127:\n # Colors appropriate for a terminal with a light background. For\n # now, only use non-bright colors...\n for i in xrange(8):\n self.default_color_map[i + 8] = self.default_color_map[i]\n\n # ...and replace white with black.\n self.default_color_map[7] = self.default_color_map[15] = 'black'\n\n # Update the current color map with the new defaults.\n self.color_map.update(self.default_color_map)\n", "path": "IPython/frontend/qt/console/ansi_code_processor.py"}]}
| 4,039 | 180 |
gh_patches_debug_15301
|
rasdani/github-patches
|
git_diff
|
conda__conda-5404
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cli.python_api` captures `stdout` from `cli.common.confirm_yn`
`cli.python_api.run_command` captures any output to `stdout`/`stderr` via `common.io.captured`. This causes the user confirmation messages from `cli.common.confirm_yn`, i.e.,
```bash
Proceed ([y]/n)?
```
to also be captured, i.e., not being output/accessible until the command finishes (which might be never if the user does not interact). The examples in the doc string are
```python
Examples:
>> run_command(Commands.CREATE, "-n newenv python=3 flask", use_exception_handler=True)
>> run_command(Commands.CREATE, "-n newenv", "python=3", "flask")
>> run_command(Commands.CREATE, ["-n newenv", "python=3", "flask"], search_path=())
```
and show exactly such use cases.
Due to this, `run_command` is only generally usable if any of `--json`, `--yes` or `--dry-run` is supplied.
There could be two solutions to this:
1. Force the user to include at least one of those arguments.
2. Instead of only capturing the output, it could be recorded but also forwarded to `stdout`/`stderr`. This could/should be made optional via some `kwarg` for `run_command`.
Possibility 1. is of course much simpler and could at least serve as a temporary solution.
`cli.python_api` captures `stdout` from `cli.common.confirm_yn`
`cli.python_api.run_command` captures any output to `stdout`/`stderr` via `common.io.captured`. This causes the user confirmation messages from `cli.common.confirm_yn`, i.e.,
```bash
Proceed ([y]/n)?
```
to also be captured, i.e., not being output/accessible until the command finishes (which might be never if the user does not interact). The examples in the doc string are
```python
Examples:
>> run_command(Commands.CREATE, "-n newenv python=3 flask", use_exception_handler=True)
>> run_command(Commands.CREATE, "-n newenv", "python=3", "flask")
>> run_command(Commands.CREATE, ["-n newenv", "python=3", "flask"], search_path=())
```
and show exactly such use cases.
Due to this, `run_command` is only generally usable if any of `--json`, `--yes` or `--dry-run` is supplied.
There could be two solutions to this:
1. Force the user to include at least one of those arguments.
2. Instead of only capturing the output, it could be recorded but also forwarded to `stdout`/`stderr`. This could/should be made optional via some `kwarg` for `run_command`.
Possibility 1. is of course much simpler and could at least serve as a temporary solution.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/cli/python_api.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from importlib import import_module
5 from logging import getLogger
6 from shlex import split
7
8 from ..base.constants import APP_NAME, SEARCH_PATH
9 from ..base.context import context
10 from ..cli.main import generate_parser
11 from ..common.io import captured, replace_log_streams
12 from ..common.path import win_path_double_escape
13 from ..exceptions import conda_exception_handler
14 from ..gateways import initialize_logging
15
16 initialize_logging()
17 log = getLogger(__name__)
18
19
20 class Commands:
21 CONFIG = "config"
22 CLEAN = "clean"
23 CREATE = "create"
24 INFO = "info"
25 INSTALL = "install"
26 LIST = "list"
27 REMOVE = "remove"
28 SEARCH = "search"
29 UPDATE = "update"
30
31
32 def get_configure_parser_function(command):
33 module = 'conda.cli.main_' + command
34 return import_module(module).configure_parser
35
36
37 def run_command(command, *arguments, **kwargs):
38 """
39
40 Args:
41 command: one of the Commands.X
42 *arguments: instructions you would normally pass to the conda comamnd on the command line
43 see below for examples
44 **kwargs: special instructions for programmatic overrides
45 use_exception_handler: defaults to False. False will let the code calling
46 `run_command` handle all exceptions. True won't raise when an exception
47 has occured, and instead give a non-zero return code
48 search_path: an optional non-standard search path for configuration information
49 that overrides the default SEARCH_PATH
50
51 Returns: a tuple of stdout, stderr, and return_code
52
53 Examples:
54 >> run_command(Commands.CREATE, "-n newenv python=3 flask", use_exception_handler=True)
55 >> run_command(Commands.CREATE, "-n newenv", "python=3", "flask")
56 >> run_command(Commands.CREATE, ["-n newenv", "python=3", "flask"], search_path=())
57
58
59 """
60 use_exception_handler = kwargs.get('use_exception_handler', False)
61 configuration_search_path = kwargs.get('search_path', SEARCH_PATH)
62 p, sub_parsers = generate_parser()
63 get_configure_parser_function(command)(sub_parsers)
64
65 arguments = map(win_path_double_escape, arguments)
66 command_line = "%s %s" % (command, " ".join(arguments))
67 split_command_line = split(command_line)
68
69 args = p.parse_args(split_command_line)
70 context.__init__(
71 search_path=configuration_search_path,
72 app_name=APP_NAME,
73 argparse_args=args,
74 )
75 log.debug("executing command >>> conda %s", command_line)
76 try:
77 with captured() as c, replace_log_streams():
78 if use_exception_handler:
79 return_code = conda_exception_handler(args.func, args, p)
80 else:
81 return_code = args.func(args, p)
82 except Exception as e:
83 log.debug("\n stdout: %s\n stderr: %s", c.stdout, c.stderr)
84 e.stdout, e.stderr = c.stdout, c.stderr
85 raise e
86 log.debug("\n stdout: %s\n stderr: %s\n return_code: %s", c.stdout, c.stderr, return_code)
87 return c.stdout, c.stderr, return_code
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/cli/python_api.py b/conda/cli/python_api.py
--- a/conda/cli/python_api.py
+++ b/conda/cli/python_api.py
@@ -35,7 +35,10 @@
def run_command(command, *arguments, **kwargs):
- """
+ """Runs a conda command in-process with a given set of command-line interface arguments.
+
+ Differences from the command-line interface:
+ Always uses --yes flag, thus does not ask for confirmation.
Args:
command: one of the Commands.X
@@ -67,6 +70,7 @@
split_command_line = split(command_line)
args = p.parse_args(split_command_line)
+ args.yes = True # always skip user confirmation, force setting context.always_yes
context.__init__(
search_path=configuration_search_path,
app_name=APP_NAME,
|
{"golden_diff": "diff --git a/conda/cli/python_api.py b/conda/cli/python_api.py\n--- a/conda/cli/python_api.py\n+++ b/conda/cli/python_api.py\n@@ -35,7 +35,10 @@\n \n \n def run_command(command, *arguments, **kwargs):\n- \"\"\"\n+ \"\"\"Runs a conda command in-process with a given set of command-line interface arguments.\n+\n+ Differences from the command-line interface:\n+ Always uses --yes flag, thus does not ask for confirmation.\n \n Args:\n command: one of the Commands.X\n@@ -67,6 +70,7 @@\n split_command_line = split(command_line)\n \n args = p.parse_args(split_command_line)\n+ args.yes = True # always skip user confirmation, force setting context.always_yes\n context.__init__(\n search_path=configuration_search_path,\n app_name=APP_NAME,\n", "issue": "`cli.python_api` captures `stdout` from `cli.common.confirm_yn`\n`cli.python_api.run_command` captures any output to `stdout`/`stderr` via `common.io.captured`. This causes the user confirmation messages from `cli.common.confirm_yn`, i.e.,\r\n```bash\r\nProceed ([y]/n)? \r\n```\r\nto also be captured, i.e., not being output/accessible until the command finishes (which might be never if the user does not interact). The examples in the doc string are\r\n```python\r\n Examples:\r\n >> run_command(Commands.CREATE, \"-n newenv python=3 flask\", use_exception_handler=True)\r\n >> run_command(Commands.CREATE, \"-n newenv\", \"python=3\", \"flask\")\r\n >> run_command(Commands.CREATE, [\"-n newenv\", \"python=3\", \"flask\"], search_path=())\r\n```\r\nand show exactly such use cases.\r\n\r\nDue to this, `run_command` is only generally usable if any of `--json`, `--yes` or `--dry-run` is supplied.\r\nThere could be two solutions to this:\r\n 1. Force the user to include at least one of those arguments.\r\n 2. Instead of only capturing the output, it could be recorded but also forwarded to `stdout`/`stderr`. This could/should be made optional via some `kwarg` for `run_command`.\r\n\r\nPossibility 1. is of course much simpler and could at least serve as a temporary solution.\n`cli.python_api` captures `stdout` from `cli.common.confirm_yn`\n`cli.python_api.run_command` captures any output to `stdout`/`stderr` via `common.io.captured`. This causes the user confirmation messages from `cli.common.confirm_yn`, i.e.,\r\n```bash\r\nProceed ([y]/n)? \r\n```\r\nto also be captured, i.e., not being output/accessible until the command finishes (which might be never if the user does not interact). The examples in the doc string are\r\n```python\r\n Examples:\r\n >> run_command(Commands.CREATE, \"-n newenv python=3 flask\", use_exception_handler=True)\r\n >> run_command(Commands.CREATE, \"-n newenv\", \"python=3\", \"flask\")\r\n >> run_command(Commands.CREATE, [\"-n newenv\", \"python=3\", \"flask\"], search_path=())\r\n```\r\nand show exactly such use cases.\r\n\r\nDue to this, `run_command` is only generally usable if any of `--json`, `--yes` or `--dry-run` is supplied.\r\nThere could be two solutions to this:\r\n 1. Force the user to include at least one of those arguments.\r\n 2. Instead of only capturing the output, it could be recorded but also forwarded to `stdout`/`stderr`. This could/should be made optional via some `kwarg` for `run_command`.\r\n\r\nPossibility 1. is of course much simpler and could at least serve as a temporary solution.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom importlib import import_module\nfrom logging import getLogger\nfrom shlex import split\n\nfrom ..base.constants import APP_NAME, SEARCH_PATH\nfrom ..base.context import context\nfrom ..cli.main import generate_parser\nfrom ..common.io import captured, replace_log_streams\nfrom ..common.path import win_path_double_escape\nfrom ..exceptions import conda_exception_handler\nfrom ..gateways import initialize_logging\n\ninitialize_logging()\nlog = getLogger(__name__)\n\n\nclass Commands:\n CONFIG = \"config\"\n CLEAN = \"clean\"\n CREATE = \"create\"\n INFO = \"info\"\n INSTALL = \"install\"\n LIST = \"list\"\n REMOVE = \"remove\"\n SEARCH = \"search\"\n UPDATE = \"update\"\n\n\ndef get_configure_parser_function(command):\n module = 'conda.cli.main_' + command\n return import_module(module).configure_parser\n\n\ndef run_command(command, *arguments, **kwargs):\n \"\"\"\n\n Args:\n command: one of the Commands.X\n *arguments: instructions you would normally pass to the conda comamnd on the command line\n see below for examples\n **kwargs: special instructions for programmatic overrides\n use_exception_handler: defaults to False. False will let the code calling\n `run_command` handle all exceptions. True won't raise when an exception\n has occured, and instead give a non-zero return code\n search_path: an optional non-standard search path for configuration information\n that overrides the default SEARCH_PATH\n\n Returns: a tuple of stdout, stderr, and return_code\n\n Examples:\n >> run_command(Commands.CREATE, \"-n newenv python=3 flask\", use_exception_handler=True)\n >> run_command(Commands.CREATE, \"-n newenv\", \"python=3\", \"flask\")\n >> run_command(Commands.CREATE, [\"-n newenv\", \"python=3\", \"flask\"], search_path=())\n\n\n \"\"\"\n use_exception_handler = kwargs.get('use_exception_handler', False)\n configuration_search_path = kwargs.get('search_path', SEARCH_PATH)\n p, sub_parsers = generate_parser()\n get_configure_parser_function(command)(sub_parsers)\n\n arguments = map(win_path_double_escape, arguments)\n command_line = \"%s %s\" % (command, \" \".join(arguments))\n split_command_line = split(command_line)\n\n args = p.parse_args(split_command_line)\n context.__init__(\n search_path=configuration_search_path,\n app_name=APP_NAME,\n argparse_args=args,\n )\n log.debug(\"executing command >>> conda %s\", command_line)\n try:\n with captured() as c, replace_log_streams():\n if use_exception_handler:\n return_code = conda_exception_handler(args.func, args, p)\n else:\n return_code = args.func(args, p)\n except Exception as e:\n log.debug(\"\\n stdout: %s\\n stderr: %s\", c.stdout, c.stderr)\n e.stdout, e.stderr = c.stdout, c.stderr\n raise e\n log.debug(\"\\n stdout: %s\\n stderr: %s\\n return_code: %s\", c.stdout, c.stderr, return_code)\n return c.stdout, c.stderr, return_code\n", "path": "conda/cli/python_api.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom importlib import import_module\nfrom logging import getLogger\nfrom shlex import split\n\nfrom ..base.constants import APP_NAME, SEARCH_PATH\nfrom ..base.context import context\nfrom ..cli.main import generate_parser\nfrom ..common.io import captured, replace_log_streams\nfrom ..common.path import win_path_double_escape\nfrom ..exceptions import conda_exception_handler\nfrom ..gateways import initialize_logging\n\ninitialize_logging()\nlog = getLogger(__name__)\n\n\nclass Commands:\n CONFIG = \"config\"\n CLEAN = \"clean\"\n CREATE = \"create\"\n INFO = \"info\"\n INSTALL = \"install\"\n LIST = \"list\"\n REMOVE = \"remove\"\n SEARCH = \"search\"\n UPDATE = \"update\"\n\n\ndef get_configure_parser_function(command):\n module = 'conda.cli.main_' + command\n return import_module(module).configure_parser\n\n\ndef run_command(command, *arguments, **kwargs):\n \"\"\"Runs a conda command in-process with a given set of command-line interface arguments.\n\n Differences from the command-line interface:\n Always uses --yes flag, thus does not ask for confirmation.\n\n Args:\n command: one of the Commands.X\n *arguments: instructions you would normally pass to the conda comamnd on the command line\n see below for examples\n **kwargs: special instructions for programmatic overrides\n use_exception_handler: defaults to False. False will let the code calling\n `run_command` handle all exceptions. True won't raise when an exception\n has occured, and instead give a non-zero return code\n search_path: an optional non-standard search path for configuration information\n that overrides the default SEARCH_PATH\n\n Returns: a tuple of stdout, stderr, and return_code\n\n Examples:\n >> run_command(Commands.CREATE, \"-n newenv python=3 flask\", use_exception_handler=True)\n >> run_command(Commands.CREATE, \"-n newenv\", \"python=3\", \"flask\")\n >> run_command(Commands.CREATE, [\"-n newenv\", \"python=3\", \"flask\"], search_path=())\n\n\n \"\"\"\n use_exception_handler = kwargs.get('use_exception_handler', False)\n configuration_search_path = kwargs.get('search_path', SEARCH_PATH)\n p, sub_parsers = generate_parser()\n get_configure_parser_function(command)(sub_parsers)\n\n arguments = map(win_path_double_escape, arguments)\n command_line = \"%s %s\" % (command, \" \".join(arguments))\n split_command_line = split(command_line)\n\n args = p.parse_args(split_command_line)\n args.yes = True # always skip user confirmation, force setting context.always_yes\n context.__init__(\n search_path=configuration_search_path,\n app_name=APP_NAME,\n argparse_args=args,\n )\n log.debug(\"executing command >>> conda %s\", command_line)\n try:\n with captured() as c, replace_log_streams():\n if use_exception_handler:\n return_code = conda_exception_handler(args.func, args, p)\n else:\n return_code = args.func(args, p)\n except Exception as e:\n log.debug(\"\\n stdout: %s\\n stderr: %s\", c.stdout, c.stderr)\n e.stdout, e.stderr = c.stdout, c.stderr\n raise e\n log.debug(\"\\n stdout: %s\\n stderr: %s\\n return_code: %s\", c.stdout, c.stderr, return_code)\n return c.stdout, c.stderr, return_code\n", "path": "conda/cli/python_api.py"}]}
| 1,784 | 195 |
gh_patches_debug_3476
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-980
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
platform.dist() deprecated
```
WARNING py.warnings platform:dist:380 /usr/lib/python3.5/site-packages/qutebrowser/utils/version.py:165: PendingDeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 and will be removed in Python 3.7
osver = ', '.join([e for e in platform.dist() if e])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/utils/version.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Utilities to show various version informations."""
21
22 import sys
23 import glob
24 import os.path
25 import platform
26 import subprocess
27 import importlib
28 import collections
29
30 from PyQt5.QtCore import QT_VERSION_STR, PYQT_VERSION_STR, qVersion
31 from PyQt5.QtWebKit import qWebKitVersion
32 from PyQt5.QtNetwork import QSslSocket
33 from PyQt5.QtWidgets import QApplication
34
35 import qutebrowser
36 from qutebrowser.utils import log, utils
37
38
39 GPL_BOILERPLATE = """
40 This program is free software: you can redistribute it and/or modify
41 it under the terms of the GNU General Public License as published by
42 the Free Software Foundation, either version 3 of the License, or
43 (at your option) any later version.
44
45 This program is distributed in the hope that it will be useful,
46 but WITHOUT ANY WARRANTY; without even the implied warranty of
47 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 GNU General Public License for more details.
49
50 You should have received a copy of the GNU General Public License
51 along with this program. If not, see <http://www.gnu.org/licenses/> or use
52 :open qute:gpl.
53 """
54
55
56 def _git_str():
57 """Try to find out git version.
58
59 Return:
60 string containing the git commit ID.
61 None if there was an error or we're not in a git repo.
62 """
63 # First try via subprocess if possible
64 commit = None
65 if not hasattr(sys, "frozen"):
66 try:
67 gitpath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
68 os.path.pardir, os.path.pardir)
69 except (NameError, OSError):
70 log.misc.exception("Error while getting git path")
71 else:
72 commit = _git_str_subprocess(gitpath)
73 if commit is not None:
74 return commit
75 # If that fails, check the git-commit-id file.
76 try:
77 return utils.read_file('git-commit-id')
78 except (OSError, ImportError):
79 return None
80
81
82 def _git_str_subprocess(gitpath):
83 """Try to get the git commit ID and timestamp by calling git.
84
85 Args:
86 gitpath: The path where the .git folder is.
87
88 Return:
89 The ID/timestamp on success, None on failure.
90 """
91 if not os.path.isdir(os.path.join(gitpath, ".git")):
92 return None
93 try:
94 cid = subprocess.check_output(
95 ['git', 'describe', '--tags', '--dirty', '--always'],
96 cwd=gitpath).decode('UTF-8').strip()
97 date = subprocess.check_output(
98 ['git', 'show', '-s', '--format=%ci', 'HEAD'],
99 cwd=gitpath).decode('UTF-8').strip()
100 return '{} ({})'.format(cid, date)
101 except (subprocess.CalledProcessError, OSError):
102 return None
103
104
105 def _release_info():
106 """Try to gather distribution release informations.
107
108 Return:
109 list of (filename, content) tuples.
110 """
111 data = []
112 for fn in glob.glob("/etc/*-release"):
113 try:
114 with open(fn, 'r', encoding='utf-8') as f:
115 data.append((fn, ''.join(f.readlines()))) # pragma: no branch
116 except OSError:
117 log.misc.exception("Error while reading {}.".format(fn))
118 return data
119
120
121 def _module_versions():
122 """Get versions of optional modules.
123
124 Return:
125 A list of lines with version info.
126 """
127 lines = []
128 modules = collections.OrderedDict([
129 ('sip', ['SIP_VERSION_STR']),
130 ('colorlog', []),
131 ('colorama', ['VERSION', '__version__']),
132 ('pypeg2', ['__version__']),
133 ('jinja2', ['__version__']),
134 ('pygments', ['__version__']),
135 ('yaml', ['__version__']),
136 ])
137 for name, attributes in modules.items():
138 try:
139 module = importlib.import_module(name)
140 except ImportError:
141 text = '{}: no'.format(name)
142 else:
143 for attr in attributes:
144 try:
145 text = '{}: {}'.format(name, getattr(module, attr))
146 except AttributeError:
147 pass
148 else:
149 break
150 else:
151 text = '{}: yes'.format(name)
152 lines.append(text)
153 return lines
154
155
156 def _os_info():
157 """Get operating system info.
158
159 Return:
160 A list of lines with version info.
161 """
162 lines = []
163 releaseinfo = None
164 if sys.platform == 'linux':
165 osver = ', '.join([e for e in platform.dist() if e])
166 releaseinfo = _release_info()
167 elif sys.platform == 'win32':
168 osver = ', '.join(platform.win32_ver())
169 elif sys.platform == 'darwin':
170 # pylint: disable=unpacking-non-sequence
171 # See https://bitbucket.org/logilab/pylint/issue/165/
172 release, versioninfo, machine = platform.mac_ver()
173 if all(not e for e in versioninfo):
174 versioninfo = ''
175 else:
176 versioninfo = '.'.join(versioninfo)
177 osver = ', '.join([e for e in (release, versioninfo, machine) if e])
178 else:
179 osver = '?'
180 lines.append('OS Version: {}'.format(osver))
181 if releaseinfo is not None:
182 for (fn, data) in releaseinfo:
183 lines += ['', '--- {} ---'.format(fn), data]
184 return lines
185
186
187 def version(short=False):
188 """Return a string with various version informations.
189
190 Args:
191 short: Return a shortened output.
192 """
193 lines = ["qutebrowser v{}".format(qutebrowser.__version__)]
194 gitver = _git_str()
195 if gitver is not None:
196 lines.append("Git commit: {}".format(gitver))
197 lines += [
198 '',
199 '{}: {}'.format(platform.python_implementation(),
200 platform.python_version()),
201 'Qt: {}, runtime: {}'.format(QT_VERSION_STR, qVersion()),
202 'PyQt: {}'.format(PYQT_VERSION_STR),
203 ]
204
205 if not short:
206 style = QApplication.instance().style()
207 lines += [
208 'Style: {}'.format(style.metaObject().className()),
209 'Desktop: {}'.format(os.environ.get('DESKTOP_SESSION')),
210 ]
211
212 lines += _module_versions()
213
214 lines += [
215 'Webkit: {}'.format(qWebKitVersion()),
216 'Harfbuzz: {}'.format(os.environ.get('QT_HARFBUZZ', 'system')),
217 'SSL: {}'.format(QSslSocket.sslLibraryVersionString()),
218 '',
219 'Frozen: {}'.format(hasattr(sys, 'frozen')),
220 'Platform: {}, {}'.format(platform.platform(),
221 platform.architecture()[0]),
222 ]
223 lines += _os_info()
224 return '\n'.join(lines)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutebrowser/utils/version.py b/qutebrowser/utils/version.py
--- a/qutebrowser/utils/version.py
+++ b/qutebrowser/utils/version.py
@@ -162,7 +162,7 @@
lines = []
releaseinfo = None
if sys.platform == 'linux':
- osver = ', '.join([e for e in platform.dist() if e])
+ osver = ''
releaseinfo = _release_info()
elif sys.platform == 'win32':
osver = ', '.join(platform.win32_ver())
|
{"golden_diff": "diff --git a/qutebrowser/utils/version.py b/qutebrowser/utils/version.py\n--- a/qutebrowser/utils/version.py\n+++ b/qutebrowser/utils/version.py\n@@ -162,7 +162,7 @@\n lines = []\n releaseinfo = None\n if sys.platform == 'linux':\n- osver = ', '.join([e for e in platform.dist() if e])\n+ osver = ''\n releaseinfo = _release_info()\n elif sys.platform == 'win32':\n osver = ', '.join(platform.win32_ver())\n", "issue": "platform.dist() deprecated\n```\nWARNING py.warnings platform:dist:380 /usr/lib/python3.5/site-packages/qutebrowser/utils/version.py:165: PendingDeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 and will be removed in Python 3.7\n osver = ', '.join([e for e in platform.dist() if e])\n```\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Utilities to show various version informations.\"\"\"\n\nimport sys\nimport glob\nimport os.path\nimport platform\nimport subprocess\nimport importlib\nimport collections\n\nfrom PyQt5.QtCore import QT_VERSION_STR, PYQT_VERSION_STR, qVersion\nfrom PyQt5.QtWebKit import qWebKitVersion\nfrom PyQt5.QtNetwork import QSslSocket\nfrom PyQt5.QtWidgets import QApplication\n\nimport qutebrowser\nfrom qutebrowser.utils import log, utils\n\n\nGPL_BOILERPLATE = \"\"\"\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/> or use\n:open qute:gpl.\n\"\"\"\n\n\ndef _git_str():\n \"\"\"Try to find out git version.\n\n Return:\n string containing the git commit ID.\n None if there was an error or we're not in a git repo.\n \"\"\"\n # First try via subprocess if possible\n commit = None\n if not hasattr(sys, \"frozen\"):\n try:\n gitpath = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n os.path.pardir, os.path.pardir)\n except (NameError, OSError):\n log.misc.exception(\"Error while getting git path\")\n else:\n commit = _git_str_subprocess(gitpath)\n if commit is not None:\n return commit\n # If that fails, check the git-commit-id file.\n try:\n return utils.read_file('git-commit-id')\n except (OSError, ImportError):\n return None\n\n\ndef _git_str_subprocess(gitpath):\n \"\"\"Try to get the git commit ID and timestamp by calling git.\n\n Args:\n gitpath: The path where the .git folder is.\n\n Return:\n The ID/timestamp on success, None on failure.\n \"\"\"\n if not os.path.isdir(os.path.join(gitpath, \".git\")):\n return None\n try:\n cid = subprocess.check_output(\n ['git', 'describe', '--tags', '--dirty', '--always'],\n cwd=gitpath).decode('UTF-8').strip()\n date = subprocess.check_output(\n ['git', 'show', '-s', '--format=%ci', 'HEAD'],\n cwd=gitpath).decode('UTF-8').strip()\n return '{} ({})'.format(cid, date)\n except (subprocess.CalledProcessError, OSError):\n return None\n\n\ndef _release_info():\n \"\"\"Try to gather distribution release informations.\n\n Return:\n list of (filename, content) tuples.\n \"\"\"\n data = []\n for fn in glob.glob(\"/etc/*-release\"):\n try:\n with open(fn, 'r', encoding='utf-8') as f:\n data.append((fn, ''.join(f.readlines()))) # pragma: no branch\n except OSError:\n log.misc.exception(\"Error while reading {}.\".format(fn))\n return data\n\n\ndef _module_versions():\n \"\"\"Get versions of optional modules.\n\n Return:\n A list of lines with version info.\n \"\"\"\n lines = []\n modules = collections.OrderedDict([\n ('sip', ['SIP_VERSION_STR']),\n ('colorlog', []),\n ('colorama', ['VERSION', '__version__']),\n ('pypeg2', ['__version__']),\n ('jinja2', ['__version__']),\n ('pygments', ['__version__']),\n ('yaml', ['__version__']),\n ])\n for name, attributes in modules.items():\n try:\n module = importlib.import_module(name)\n except ImportError:\n text = '{}: no'.format(name)\n else:\n for attr in attributes:\n try:\n text = '{}: {}'.format(name, getattr(module, attr))\n except AttributeError:\n pass\n else:\n break\n else:\n text = '{}: yes'.format(name)\n lines.append(text)\n return lines\n\n\ndef _os_info():\n \"\"\"Get operating system info.\n\n Return:\n A list of lines with version info.\n \"\"\"\n lines = []\n releaseinfo = None\n if sys.platform == 'linux':\n osver = ', '.join([e for e in platform.dist() if e])\n releaseinfo = _release_info()\n elif sys.platform == 'win32':\n osver = ', '.join(platform.win32_ver())\n elif sys.platform == 'darwin':\n # pylint: disable=unpacking-non-sequence\n # See https://bitbucket.org/logilab/pylint/issue/165/\n release, versioninfo, machine = platform.mac_ver()\n if all(not e for e in versioninfo):\n versioninfo = ''\n else:\n versioninfo = '.'.join(versioninfo)\n osver = ', '.join([e for e in (release, versioninfo, machine) if e])\n else:\n osver = '?'\n lines.append('OS Version: {}'.format(osver))\n if releaseinfo is not None:\n for (fn, data) in releaseinfo:\n lines += ['', '--- {} ---'.format(fn), data]\n return lines\n\n\ndef version(short=False):\n \"\"\"Return a string with various version informations.\n\n Args:\n short: Return a shortened output.\n \"\"\"\n lines = [\"qutebrowser v{}\".format(qutebrowser.__version__)]\n gitver = _git_str()\n if gitver is not None:\n lines.append(\"Git commit: {}\".format(gitver))\n lines += [\n '',\n '{}: {}'.format(platform.python_implementation(),\n platform.python_version()),\n 'Qt: {}, runtime: {}'.format(QT_VERSION_STR, qVersion()),\n 'PyQt: {}'.format(PYQT_VERSION_STR),\n ]\n\n if not short:\n style = QApplication.instance().style()\n lines += [\n 'Style: {}'.format(style.metaObject().className()),\n 'Desktop: {}'.format(os.environ.get('DESKTOP_SESSION')),\n ]\n\n lines += _module_versions()\n\n lines += [\n 'Webkit: {}'.format(qWebKitVersion()),\n 'Harfbuzz: {}'.format(os.environ.get('QT_HARFBUZZ', 'system')),\n 'SSL: {}'.format(QSslSocket.sslLibraryVersionString()),\n '',\n 'Frozen: {}'.format(hasattr(sys, 'frozen')),\n 'Platform: {}, {}'.format(platform.platform(),\n platform.architecture()[0]),\n ]\n lines += _os_info()\n return '\\n'.join(lines)\n", "path": "qutebrowser/utils/version.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Utilities to show various version informations.\"\"\"\n\nimport sys\nimport glob\nimport os.path\nimport platform\nimport subprocess\nimport importlib\nimport collections\n\nfrom PyQt5.QtCore import QT_VERSION_STR, PYQT_VERSION_STR, qVersion\nfrom PyQt5.QtWebKit import qWebKitVersion\nfrom PyQt5.QtNetwork import QSslSocket\nfrom PyQt5.QtWidgets import QApplication\n\nimport qutebrowser\nfrom qutebrowser.utils import log, utils\n\n\nGPL_BOILERPLATE = \"\"\"\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/> or use\n:open qute:gpl.\n\"\"\"\n\n\ndef _git_str():\n \"\"\"Try to find out git version.\n\n Return:\n string containing the git commit ID.\n None if there was an error or we're not in a git repo.\n \"\"\"\n # First try via subprocess if possible\n commit = None\n if not hasattr(sys, \"frozen\"):\n try:\n gitpath = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n os.path.pardir, os.path.pardir)\n except (NameError, OSError):\n log.misc.exception(\"Error while getting git path\")\n else:\n commit = _git_str_subprocess(gitpath)\n if commit is not None:\n return commit\n # If that fails, check the git-commit-id file.\n try:\n return utils.read_file('git-commit-id')\n except (OSError, ImportError):\n return None\n\n\ndef _git_str_subprocess(gitpath):\n \"\"\"Try to get the git commit ID and timestamp by calling git.\n\n Args:\n gitpath: The path where the .git folder is.\n\n Return:\n The ID/timestamp on success, None on failure.\n \"\"\"\n if not os.path.isdir(os.path.join(gitpath, \".git\")):\n return None\n try:\n cid = subprocess.check_output(\n ['git', 'describe', '--tags', '--dirty', '--always'],\n cwd=gitpath).decode('UTF-8').strip()\n date = subprocess.check_output(\n ['git', 'show', '-s', '--format=%ci', 'HEAD'],\n cwd=gitpath).decode('UTF-8').strip()\n return '{} ({})'.format(cid, date)\n except (subprocess.CalledProcessError, OSError):\n return None\n\n\ndef _release_info():\n \"\"\"Try to gather distribution release informations.\n\n Return:\n list of (filename, content) tuples.\n \"\"\"\n data = []\n for fn in glob.glob(\"/etc/*-release\"):\n try:\n with open(fn, 'r', encoding='utf-8') as f:\n data.append((fn, ''.join(f.readlines()))) # pragma: no branch\n except OSError:\n log.misc.exception(\"Error while reading {}.\".format(fn))\n return data\n\n\ndef _module_versions():\n \"\"\"Get versions of optional modules.\n\n Return:\n A list of lines with version info.\n \"\"\"\n lines = []\n modules = collections.OrderedDict([\n ('sip', ['SIP_VERSION_STR']),\n ('colorlog', []),\n ('colorama', ['VERSION', '__version__']),\n ('pypeg2', ['__version__']),\n ('jinja2', ['__version__']),\n ('pygments', ['__version__']),\n ('yaml', ['__version__']),\n ])\n for name, attributes in modules.items():\n try:\n module = importlib.import_module(name)\n except ImportError:\n text = '{}: no'.format(name)\n else:\n for attr in attributes:\n try:\n text = '{}: {}'.format(name, getattr(module, attr))\n except AttributeError:\n pass\n else:\n break\n else:\n text = '{}: yes'.format(name)\n lines.append(text)\n return lines\n\n\ndef _os_info():\n \"\"\"Get operating system info.\n\n Return:\n A list of lines with version info.\n \"\"\"\n lines = []\n releaseinfo = None\n if sys.platform == 'linux':\n osver = ''\n releaseinfo = _release_info()\n elif sys.platform == 'win32':\n osver = ', '.join(platform.win32_ver())\n elif sys.platform == 'darwin':\n # pylint: disable=unpacking-non-sequence\n # See https://bitbucket.org/logilab/pylint/issue/165/\n release, versioninfo, machine = platform.mac_ver()\n if all(not e for e in versioninfo):\n versioninfo = ''\n else:\n versioninfo = '.'.join(versioninfo)\n osver = ', '.join([e for e in (release, versioninfo, machine) if e])\n else:\n osver = '?'\n lines.append('OS Version: {}'.format(osver))\n if releaseinfo is not None:\n for (fn, data) in releaseinfo:\n lines += ['', '--- {} ---'.format(fn), data]\n return lines\n\n\ndef version(short=False):\n \"\"\"Return a string with various version informations.\n\n Args:\n short: Return a shortened output.\n \"\"\"\n lines = [\"qutebrowser v{}\".format(qutebrowser.__version__)]\n gitver = _git_str()\n if gitver is not None:\n lines.append(\"Git commit: {}\".format(gitver))\n lines += [\n '',\n '{}: {}'.format(platform.python_implementation(),\n platform.python_version()),\n 'Qt: {}, runtime: {}'.format(QT_VERSION_STR, qVersion()),\n 'PyQt: {}'.format(PYQT_VERSION_STR),\n ]\n\n if not short:\n style = QApplication.instance().style()\n lines += [\n 'Style: {}'.format(style.metaObject().className()),\n 'Desktop: {}'.format(os.environ.get('DESKTOP_SESSION')),\n ]\n\n lines += _module_versions()\n\n lines += [\n 'Webkit: {}'.format(qWebKitVersion()),\n 'Harfbuzz: {}'.format(os.environ.get('QT_HARFBUZZ', 'system')),\n 'SSL: {}'.format(QSslSocket.sslLibraryVersionString()),\n '',\n 'Frozen: {}'.format(hasattr(sys, 'frozen')),\n 'Platform: {}, {}'.format(platform.platform(),\n platform.architecture()[0]),\n ]\n lines += _os_info()\n return '\\n'.join(lines)\n", "path": "qutebrowser/utils/version.py"}]}
| 2,603 | 124 |
gh_patches_debug_66311
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-2130
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the path of `elasticdl_ps` compatible with various Python versions and OS in the Dockerfile.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl_client/api.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import os
15 import shutil
16
17 import docker
18 from jinja2 import Template
19
20 from elasticdl_client.common import k8s_client as k8s
21 from elasticdl_client.common.args import (
22 build_arguments_from_parsed_result,
23 parse_envs,
24 wrap_python_args_with_string,
25 )
26 from elasticdl_client.common.constants import BashCommandTemplate
27 from elasticdl_client.common.log_utils import default_logger as logger
28
29
30 def init_zoo(args):
31 logger.info("Create the Dockerfile for the model zoo.")
32
33 # Copy cluster spec file to the current directory if specified
34 cluster_spec_path = args.cluster_spec
35 cluster_spec_name = None
36 if cluster_spec_path:
37 if not os.path.exists(cluster_spec_path):
38 raise RuntimeError(
39 "The cluster spec {} doesn't exist".format(cluster_spec_path)
40 )
41 try:
42 shutil.copy2(cluster_spec_path, os.getcwd())
43 except shutil.SameFileError:
44 pass
45 cluster_spec_name = os.path.basename(cluster_spec_path)
46
47 # Create the docker file
48 # Build the content from the template and arguments
49 tmpl_str = """\
50 FROM {{ BASE_IMAGE }} as base
51
52 RUN pip install elasticdl_preprocessing\
53 --extra-index-url={{ EXTRA_PYPI_INDEX }}
54
55 RUN pip install elasticdl --extra-index-url={{ EXTRA_PYPI_INDEX }}
56 ENV PATH /usr/local/lib/python3.6/site-packages/elasticdl/go/bin:$PATH
57
58 COPY . /model_zoo
59 RUN pip install -r /model_zoo/requirements.txt\
60 --extra-index-url={{ EXTRA_PYPI_INDEX }}
61
62 {% if CLUSTER_SPEC_NAME %}\
63 COPY ./{{ CLUSTER_SPEC_NAME }} /cluster_spec/{{ CLUSTER_SPEC_NAME }}\
64 {% endif %}
65 """
66 template = Template(tmpl_str)
67 docker_file_content = template.render(
68 BASE_IMAGE=args.base_image,
69 EXTRA_PYPI_INDEX=args.extra_pypi_index,
70 CLUSTER_SPEC_NAME=cluster_spec_name,
71 )
72
73 with open("./Dockerfile", mode="w") as f:
74 f.write(docker_file_content)
75
76
77 def build_zoo(args):
78 logger.info("Build the image for the model zoo.")
79 # Call docker api to build the image
80 # Validate the image name schema
81 client = docker.DockerClient.from_env()
82 for line in client.api.build(
83 dockerfile="./Dockerfile",
84 path=args.path,
85 rm=True,
86 tag=args.image,
87 decode=True,
88 ):
89 _print_docker_progress(line)
90
91
92 def push_zoo(args):
93 logger.info("Push the image for the model zoo.")
94 # Call docker api to push the image to remote registry
95 client = docker.DockerClient.from_env()
96 for line in client.api.push(args.image, stream=True, decode=True):
97 _print_docker_progress(line)
98
99
100 def train(args):
101 container_args = [
102 "--worker_image",
103 args.image_name,
104 "--model_zoo",
105 args.model_zoo,
106 "--cluster_spec",
107 args.cluster_spec,
108 ]
109
110 container_args.extend(
111 build_arguments_from_parsed_result(
112 args,
113 filter_args=[
114 "model_zoo",
115 "cluster_spec",
116 "worker_image",
117 "force_use_kube_config_file",
118 "func",
119 ],
120 )
121 )
122
123 _submit_job(args.image_name, args, container_args)
124
125
126 def evaluate(args):
127 container_args = [
128 "--worker_image",
129 args.image_name,
130 "--model_zoo",
131 args.model_zoo,
132 "--cluster_spec",
133 args.cluster_spec,
134 ]
135 container_args.extend(
136 build_arguments_from_parsed_result(
137 args,
138 filter_args=[
139 "model_zoo",
140 "cluster_spec",
141 "worker_image",
142 "force_use_kube_config_file",
143 "func",
144 ],
145 )
146 )
147
148 _submit_job(args.image_name, args, container_args)
149
150
151 def predict(args):
152 container_args = [
153 "--worker_image",
154 args.image_name,
155 "--model_zoo",
156 args.model_zoo,
157 "--cluster_spec",
158 args.cluster_spec,
159 ]
160
161 container_args.extend(
162 build_arguments_from_parsed_result(
163 args,
164 filter_args=[
165 "model_zoo",
166 "cluster_spec",
167 "worker_image",
168 "force_use_kube_config_file",
169 "func",
170 ],
171 )
172 )
173
174 _submit_job(args.image_name, args, container_args)
175
176
177 def _submit_job(image_name, client_args, container_args):
178 client = k8s.Client(
179 image_name=image_name,
180 namespace=client_args.namespace,
181 job_name=client_args.job_name,
182 cluster_spec=client_args.cluster_spec,
183 force_use_kube_config_file=client_args.force_use_kube_config_file,
184 )
185
186 container_args = wrap_python_args_with_string(container_args)
187
188 master_client_command = (
189 BashCommandTemplate.SET_PIPEFAIL
190 + " python -m elasticdl.python.master.main"
191 )
192 container_args.insert(0, master_client_command)
193 if client_args.log_file_path:
194 container_args.append(
195 BashCommandTemplate.REDIRECTION.format(client_args.log_file_path)
196 )
197
198 python_command = " ".join(container_args)
199 container_args = ["-c", python_command]
200
201 if client_args.yaml:
202 client.dump_master_yaml(
203 resource_requests=client_args.master_resource_request,
204 resource_limits=client_args.master_resource_limit,
205 args=container_args,
206 pod_priority=client_args.master_pod_priority,
207 image_pull_policy=client_args.image_pull_policy,
208 restart_policy=client_args.restart_policy,
209 volume=client_args.volume,
210 envs=parse_envs(client_args.envs),
211 yaml=client_args.yaml,
212 )
213 logger.info(
214 "ElasticDL job %s YAML has been dumped into file %s."
215 % (client_args.job_name, client_args.yaml)
216 )
217 else:
218 client.create_master(
219 resource_requests=client_args.master_resource_request,
220 resource_limits=client_args.master_resource_limit,
221 args=container_args,
222 pod_priority=client_args.master_pod_priority,
223 image_pull_policy=client_args.image_pull_policy,
224 restart_policy=client_args.restart_policy,
225 volume=client_args.volume,
226 envs=parse_envs(client_args.envs),
227 )
228 logger.info(
229 "ElasticDL job %s was successfully submitted. "
230 "The master pod is: %s."
231 % (client_args.job_name, client.get_master_pod_name())
232 )
233
234
235 def _get_docker_client(docker_base_url, docker_tlscert, docker_tlskey):
236 if docker_tlscert and docker_tlskey:
237 tls_config = docker.tls.TLSConfig(
238 client_cert=(docker_tlscert, docker_tlskey)
239 )
240 return docker.APIClient(base_url=docker_base_url, tls=tls_config)
241 else:
242 return docker.APIClient(base_url=docker_base_url)
243
244
245 def _print_docker_progress(line):
246 error = line.get("error", None)
247 if error:
248 raise RuntimeError("Docker image build: " + error)
249 stream = line.get("stream", None)
250 if stream:
251 print(stream, end="")
252 else:
253 print(line)
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl_client/api.py b/elasticdl_client/api.py
--- a/elasticdl_client/api.py
+++ b/elasticdl_client/api.py
@@ -53,7 +53,9 @@
--extra-index-url={{ EXTRA_PYPI_INDEX }}
RUN pip install elasticdl --extra-index-url={{ EXTRA_PYPI_INDEX }}
-ENV PATH /usr/local/lib/python3.6/site-packages/elasticdl/go/bin:$PATH
+RUN /bin/bash -c\
+ 'PYTHON_PKG_PATH=$(pip3 show elasticdl | grep "Location:" | cut -d " " -f2);\
+ echo "PATH=${PYTHON_PKG_PATH}/elasticdl/go/bin:$PATH" >> /root/.bashrc'
COPY . /model_zoo
RUN pip install -r /model_zoo/requirements.txt\
|
{"golden_diff": "diff --git a/elasticdl_client/api.py b/elasticdl_client/api.py\n--- a/elasticdl_client/api.py\n+++ b/elasticdl_client/api.py\n@@ -53,7 +53,9 @@\n --extra-index-url={{ EXTRA_PYPI_INDEX }}\n \n RUN pip install elasticdl --extra-index-url={{ EXTRA_PYPI_INDEX }}\n-ENV PATH /usr/local/lib/python3.6/site-packages/elasticdl/go/bin:$PATH\n+RUN /bin/bash -c\\\n+ 'PYTHON_PKG_PATH=$(pip3 show elasticdl | grep \"Location:\" | cut -d \" \" -f2);\\\n+ echo \"PATH=${PYTHON_PKG_PATH}/elasticdl/go/bin:$PATH\" >> /root/.bashrc'\n \n COPY . /model_zoo\n RUN pip install -r /model_zoo/requirements.txt\\\n", "issue": "Make the path of `elasticdl_ps` compatible with various Python versions and OS in the Dockerfile. \n\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport docker\nfrom jinja2 import Template\n\nfrom elasticdl_client.common import k8s_client as k8s\nfrom elasticdl_client.common.args import (\n build_arguments_from_parsed_result,\n parse_envs,\n wrap_python_args_with_string,\n)\nfrom elasticdl_client.common.constants import BashCommandTemplate\nfrom elasticdl_client.common.log_utils import default_logger as logger\n\n\ndef init_zoo(args):\n logger.info(\"Create the Dockerfile for the model zoo.\")\n\n # Copy cluster spec file to the current directory if specified\n cluster_spec_path = args.cluster_spec\n cluster_spec_name = None\n if cluster_spec_path:\n if not os.path.exists(cluster_spec_path):\n raise RuntimeError(\n \"The cluster spec {} doesn't exist\".format(cluster_spec_path)\n )\n try:\n shutil.copy2(cluster_spec_path, os.getcwd())\n except shutil.SameFileError:\n pass\n cluster_spec_name = os.path.basename(cluster_spec_path)\n\n # Create the docker file\n # Build the content from the template and arguments\n tmpl_str = \"\"\"\\\nFROM {{ BASE_IMAGE }} as base\n\nRUN pip install elasticdl_preprocessing\\\n --extra-index-url={{ EXTRA_PYPI_INDEX }}\n\nRUN pip install elasticdl --extra-index-url={{ EXTRA_PYPI_INDEX }}\nENV PATH /usr/local/lib/python3.6/site-packages/elasticdl/go/bin:$PATH\n\nCOPY . /model_zoo\nRUN pip install -r /model_zoo/requirements.txt\\\n --extra-index-url={{ EXTRA_PYPI_INDEX }}\n\n{% if CLUSTER_SPEC_NAME %}\\\nCOPY ./{{ CLUSTER_SPEC_NAME }} /cluster_spec/{{ CLUSTER_SPEC_NAME }}\\\n{% endif %}\n\"\"\"\n template = Template(tmpl_str)\n docker_file_content = template.render(\n BASE_IMAGE=args.base_image,\n EXTRA_PYPI_INDEX=args.extra_pypi_index,\n CLUSTER_SPEC_NAME=cluster_spec_name,\n )\n\n with open(\"./Dockerfile\", mode=\"w\") as f:\n f.write(docker_file_content)\n\n\ndef build_zoo(args):\n logger.info(\"Build the image for the model zoo.\")\n # Call docker api to build the image\n # Validate the image name schema\n client = docker.DockerClient.from_env()\n for line in client.api.build(\n dockerfile=\"./Dockerfile\",\n path=args.path,\n rm=True,\n tag=args.image,\n decode=True,\n ):\n _print_docker_progress(line)\n\n\ndef push_zoo(args):\n logger.info(\"Push the image for the model zoo.\")\n # Call docker api to push the image to remote registry\n client = docker.DockerClient.from_env()\n for line in client.api.push(args.image, stream=True, decode=True):\n _print_docker_progress(line)\n\n\ndef train(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef evaluate(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef predict(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n cluster_spec=client_args.cluster_spec,\n force_use_kube_config_file=client_args.force_use_kube_config_file,\n )\n\n container_args = wrap_python_args_with_string(container_args)\n\n master_client_command = (\n BashCommandTemplate.SET_PIPEFAIL\n + \" python -m elasticdl.python.master.main\"\n )\n container_args.insert(0, master_client_command)\n if client_args.log_file_path:\n container_args.append(\n BashCommandTemplate.REDIRECTION.format(client_args.log_file_path)\n )\n\n python_command = \" \".join(container_args)\n container_args = [\"-c\", python_command]\n\n if client_args.yaml:\n client.dump_master_yaml(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n yaml=client_args.yaml,\n )\n logger.info(\n \"ElasticDL job %s YAML has been dumped into file %s.\"\n % (client_args.job_name, client_args.yaml)\n )\n else:\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n )\n logger.info(\n \"ElasticDL job %s was successfully submitted. \"\n \"The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _get_docker_client(docker_base_url, docker_tlscert, docker_tlskey):\n if docker_tlscert and docker_tlskey:\n tls_config = docker.tls.TLSConfig(\n client_cert=(docker_tlscert, docker_tlskey)\n )\n return docker.APIClient(base_url=docker_base_url, tls=tls_config)\n else:\n return docker.APIClient(base_url=docker_base_url)\n\n\ndef _print_docker_progress(line):\n error = line.get(\"error\", None)\n if error:\n raise RuntimeError(\"Docker image build: \" + error)\n stream = line.get(\"stream\", None)\n if stream:\n print(stream, end=\"\")\n else:\n print(line)\n", "path": "elasticdl_client/api.py"}], "after_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\n\nimport docker\nfrom jinja2 import Template\n\nfrom elasticdl_client.common import k8s_client as k8s\nfrom elasticdl_client.common.args import (\n build_arguments_from_parsed_result,\n parse_envs,\n wrap_python_args_with_string,\n)\nfrom elasticdl_client.common.constants import BashCommandTemplate\nfrom elasticdl_client.common.log_utils import default_logger as logger\n\n\ndef init_zoo(args):\n logger.info(\"Create the Dockerfile for the model zoo.\")\n\n # Copy cluster spec file to the current directory if specified\n cluster_spec_path = args.cluster_spec\n cluster_spec_name = None\n if cluster_spec_path:\n if not os.path.exists(cluster_spec_path):\n raise RuntimeError(\n \"The cluster spec {} doesn't exist\".format(cluster_spec_path)\n )\n try:\n shutil.copy2(cluster_spec_path, os.getcwd())\n except shutil.SameFileError:\n pass\n cluster_spec_name = os.path.basename(cluster_spec_path)\n\n # Create the docker file\n # Build the content from the template and arguments\n tmpl_str = \"\"\"\\\nFROM {{ BASE_IMAGE }} as base\n\nRUN pip install elasticdl_preprocessing\\\n --extra-index-url={{ EXTRA_PYPI_INDEX }}\n\nRUN pip install elasticdl --extra-index-url={{ EXTRA_PYPI_INDEX }}\nRUN /bin/bash -c\\\n 'PYTHON_PKG_PATH=$(pip3 show elasticdl | grep \"Location:\" | cut -d \" \" -f2);\\\n echo \"PATH=${PYTHON_PKG_PATH}/elasticdl/go/bin:$PATH\" >> /root/.bashrc'\n\nCOPY . /model_zoo\nRUN pip install -r /model_zoo/requirements.txt\\\n --extra-index-url={{ EXTRA_PYPI_INDEX }}\n\n{% if CLUSTER_SPEC_NAME %}\\\nCOPY ./{{ CLUSTER_SPEC_NAME }} /cluster_spec/{{ CLUSTER_SPEC_NAME }}\\\n{% endif %}\n\"\"\"\n template = Template(tmpl_str)\n docker_file_content = template.render(\n BASE_IMAGE=args.base_image,\n EXTRA_PYPI_INDEX=args.extra_pypi_index,\n CLUSTER_SPEC_NAME=cluster_spec_name,\n )\n\n with open(\"./Dockerfile\", mode=\"w\") as f:\n f.write(docker_file_content)\n\n\ndef build_zoo(args):\n logger.info(\"Build the image for the model zoo.\")\n # Call docker api to build the image\n # Validate the image name schema\n client = docker.DockerClient.from_env()\n for line in client.api.build(\n dockerfile=\"./Dockerfile\",\n path=args.path,\n rm=True,\n tag=args.image,\n decode=True,\n ):\n _print_docker_progress(line)\n\n\ndef push_zoo(args):\n logger.info(\"Push the image for the model zoo.\")\n # Call docker api to push the image to remote registry\n client = docker.DockerClient.from_env()\n for line in client.api.push(args.image, stream=True, decode=True):\n _print_docker_progress(line)\n\n\ndef train(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef evaluate(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef predict(args):\n container_args = [\n \"--worker_image\",\n args.image_name,\n \"--model_zoo\",\n args.model_zoo,\n \"--cluster_spec\",\n args.cluster_spec,\n ]\n\n container_args.extend(\n build_arguments_from_parsed_result(\n args,\n filter_args=[\n \"model_zoo\",\n \"cluster_spec\",\n \"worker_image\",\n \"force_use_kube_config_file\",\n \"func\",\n ],\n )\n )\n\n _submit_job(args.image_name, args, container_args)\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n cluster_spec=client_args.cluster_spec,\n force_use_kube_config_file=client_args.force_use_kube_config_file,\n )\n\n container_args = wrap_python_args_with_string(container_args)\n\n master_client_command = (\n BashCommandTemplate.SET_PIPEFAIL\n + \" python -m elasticdl.python.master.main\"\n )\n container_args.insert(0, master_client_command)\n if client_args.log_file_path:\n container_args.append(\n BashCommandTemplate.REDIRECTION.format(client_args.log_file_path)\n )\n\n python_command = \" \".join(container_args)\n container_args = [\"-c\", python_command]\n\n if client_args.yaml:\n client.dump_master_yaml(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n yaml=client_args.yaml,\n )\n logger.info(\n \"ElasticDL job %s YAML has been dumped into file %s.\"\n % (client_args.job_name, client_args.yaml)\n )\n else:\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n )\n logger.info(\n \"ElasticDL job %s was successfully submitted. \"\n \"The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _get_docker_client(docker_base_url, docker_tlscert, docker_tlskey):\n if docker_tlscert and docker_tlskey:\n tls_config = docker.tls.TLSConfig(\n client_cert=(docker_tlscert, docker_tlskey)\n )\n return docker.APIClient(base_url=docker_base_url, tls=tls_config)\n else:\n return docker.APIClient(base_url=docker_base_url)\n\n\ndef _print_docker_progress(line):\n error = line.get(\"error\", None)\n if error:\n raise RuntimeError(\"Docker image build: \" + error)\n stream = line.get(\"stream\", None)\n if stream:\n print(stream, end=\"\")\n else:\n print(line)\n", "path": "elasticdl_client/api.py"}]}
| 2,595 | 174 |
gh_patches_debug_23424
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-8496
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] Path expansion in virtualrunenv generator adds extra colon for empty path
### Environment Details
* Operating System+version: Debian GNU/Linux 10
* Conan version: 1.34.0-dev
* Python version: 3.7.0
* Shell: GNU bash, version 5.0.3(1)-release (x86_64-pc-linux-gnu)
### Steps to reproduce (Include if Applicable)
~~~bash
export LD_LIBRARY_PATH=
# install a recipe with "-g virtualrunenv"
source activate.sh
echo $LD_LIBRARY_PATH
~~~
The last echo prints path with extra ":" at the end.
### Explanation
According to [Bash expansion](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html):
> **${parameter:+word}**
> If parameter is null or unset, nothing is substituted, otherwise the expansion of word is substituted.
Conan uses:
~~~bash
PATH="..."${PATH+:$PATH}
~~~
Using `${var+:$var}`:
~~~bash
export DEFINED_EMPTY_VAR=
echo "xxx"${DEFINED_EMPTY_VAR+:$DEFINED_EMPTY_VAR}
echo "xxx"${UNDEFINED_VAR+:$UNDEFINED_VAR}
~~~
Results
~~~
xxx:
xxx
~~~
Correct form `${var:+:$var}`:
~~~bash
export DEFINED_EMPTY_VAR=
echo "xxx"${DEFINED_EMPTY_VAR:+:$DEFINED_EMPTY_VAR}
echo "xxx"${UNDEFINED_VAR:+:$UNDEFINED_VAR}
~~~
Results
~~~
xxx
xxx
~~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/envvars/environment.py`
Content:
```
1 import os
2 import platform
3 import textwrap
4 from collections import OrderedDict
5
6 from jinja2 import Template
7
8 from conans.errors import ConanException
9 from conans.util.files import normalize
10
11 sh_activate = textwrap.dedent("""\
12 #!/usr/bin/env sh
13
14 {%- for it in modified_vars %}
15 export CONAN_OLD_{{it}}="${{it}}"
16 {%- endfor %}
17
18 while read -r line; do
19 LINE="$(eval echo $line)";
20 export "$LINE";
21 done < "{{ environment_file }}"
22
23 export CONAN_OLD_PS1="$PS1"
24 export PS1="({{venv_name}}) $PS1"
25 """)
26
27 sh_deactivate = textwrap.dedent("""\
28 #!/usr/bin/env sh
29 export PS1="$CONAN_OLD_PS1"
30 unset CONAN_OLD_PS1
31
32 {% for it in modified_vars %}
33 export {{it}}="$CONAN_OLD_{{it}}"
34 unset CONAN_OLD_{{it}}
35 {%- endfor %}
36 {%- for it in new_vars %}
37 unset {{it}}
38 {%- endfor %}
39 """)
40
41 bat_activate = textwrap.dedent("""\
42 @echo off
43
44 {%- for it in modified_vars %}
45 SET "CONAN_OLD_{{it}}=%{{it}}%"
46 {%- endfor %}
47
48 FOR /F "usebackq tokens=1,* delims==" %%i IN ("{{ environment_file }}") DO (
49 CALL SET "%%i=%%j"
50 )
51
52 SET "CONAN_OLD_PROMPT=%PROMPT%"
53 SET "PROMPT=({{venv_name}}) %PROMPT%"
54 """)
55
56 bat_deactivate = textwrap.dedent("""\
57 @echo off
58
59 SET "PROMPT=%CONAN_OLD_PROMPT%"
60 SET "CONAN_OLD_PROMPT="
61
62 {% for it in modified_vars %}
63 SET "{{it}}=%CONAN_OLD_{{it}}%"
64 SET "CONAN_OLD_{{it}}="
65 {%- endfor %}
66 {%- for it in new_vars %}
67 SET "{{it}}="
68 {%- endfor %}
69 """)
70
71 ps1_activate = textwrap.dedent("""\
72 {%- for it in modified_vars %}
73 $env:CONAN_OLD_{{venv_name}}_{{it}}=$env:{{it}}
74 {%- endfor %}
75
76 foreach ($line in Get-Content "{{ environment_file }}") {
77 $var,$value = $line -split '=',2
78 $value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)
79 Set-Item env:\\$var -Value "$value_expanded"
80 }
81
82 function global:_old_conan_{{venv_name}}_prompt {""}
83 $function:_old_conan_{{venv_name}}_prompt = $function:prompt
84 function global:prompt {
85 write-host "({{venv_name}}) " -nonewline; & $function:_old_conan_{{venv_name}}_prompt
86 }
87 """)
88
89 ps1_deactivate = textwrap.dedent("""\
90 $function:prompt = $function:_old_conan_{{venv_name}}_prompt
91 remove-item function:_old_conan_{{venv_name}}_prompt
92
93 {% for it in modified_vars %}
94 $env:{{it}}=$env:CONAN_OLD_{{venv_name}}_{{it}}
95 Remove-Item env:CONAN_OLD_{{venv_name}}_{{it}}
96 {%- endfor %}
97 {%- for it in new_vars %}
98 Remove-Item env:{{it}}
99 {%- endfor %}
100 """)
101
102
103 BAT_FLAVOR = "bat"
104 PS1_FLAVOR = "ps1"
105 SH_FLAVOR = "sh"
106
107
108 def _variable_placeholder(flavor, name, append_with_spaces):
109 """
110 :param flavor: flavor of the execution environment
111 :param name: variable name
112 :return: placeholder for the variable name formatted for a certain execution environment.
113 (e.g., cmd, ps1, sh).
114 """
115 if flavor == BAT_FLAVOR:
116 return "%{}%".format(name)
117 if flavor == PS1_FLAVOR:
118 return "$env:%s" % name
119 # flavor == sh
120 return "${%s+ $%s}" % (name, name) if append_with_spaces else "${%s+:$%s}" % (name, name)
121
122
123 def _format_values(flavor, variables, append_with_spaces):
124 """
125 Formats the values for the different supported script language flavors.
126 :param flavor: flavor of the execution environment
127 :param variables: variables to be formatted
128 :return:
129 """
130
131 if flavor in [BAT_FLAVOR, PS1_FLAVOR] and platform.system() == "Windows":
132 path_sep, quote_elements = ";", False
133 elif flavor == PS1_FLAVOR:
134 path_sep, quote_elements = ":", False
135 else:
136 path_sep, quote_elements = ":", True
137
138 for name, value in variables:
139 # activate values
140 if isinstance(value, list):
141 value = list(OrderedDict.fromkeys(value)) # Avoid repeated entries, while keeping order
142 append_space = name in append_with_spaces
143 placeholder = _variable_placeholder(flavor, name, append_space)
144 if append_space:
145 # Variables joined with spaces look like: CPPFLAGS="one two three"
146 value = " ".join(value+[placeholder])
147 value = "\"%s\"" % value if quote_elements else value
148 else:
149 # Quoted variables joined with pathset may look like:
150 # PATH="one path":"two paths"
151 # Unquoted variables joined with pathset may look like: PATH=one path;two paths
152 value = ["\"%s\"" % v for v in value] if quote_elements else value
153 if flavor == SH_FLAVOR:
154 value = path_sep.join(value) + placeholder
155 else:
156 value = path_sep.join(value + [placeholder])
157 else:
158 # single value
159 value = "\"%s\"" % value if quote_elements else value
160 if platform.system() != "Windows":
161 value = value.replace("\\", "\\\\")
162
163 # deactivate values
164 existing = name in os.environ
165 yield name, value, existing
166
167
168 def _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,
169 env_filepath):
170 ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))
171 modified_vars = [name for name, _, existing in ret if existing]
172 new_vars = [name for name, _, existing in ret if not existing]
173
174 activate_content = activate_tpl.render(environment_file=env_filepath,
175 modified_vars=modified_vars, new_vars=new_vars,
176 venv_name=venv_name)
177 deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars,
178 venv_name=venv_name)
179
180 environment_lines = ["{}={}".format(name, value) for name, value, _ in ret]
181 # This blank line is important, otherwise the script doens't process last line
182 environment_lines.append('')
183
184 if flavor == SH_FLAVOR:
185 # replace CRLF->LF guarantee it is always LF, irrespective of current .py file
186 activate_content = activate_content.replace("\r\n", "\n")
187 deactivate_content = deactivate_content.replace("\r\n", "\n")
188 environment = "\n".join(environment_lines)
189 else:
190 activate_content = normalize(activate_content)
191 deactivate_content = normalize(deactivate_content)
192 environment = os.linesep.join(environment_lines)
193
194 return activate_content, deactivate_content, environment
195
196
197 def env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):
198 env_filename = "environment{}.{}.env".format(name, flavor)
199 activate_filename = "activate{}.{}".format(name, flavor)
200 deactivate_filename = "deactivate{}.{}".format(name, flavor)
201
202 templates = {SH_FLAVOR: (sh_activate, sh_deactivate),
203 BAT_FLAVOR: (bat_activate, bat_deactivate),
204 PS1_FLAVOR: (ps1_activate, ps1_deactivate)}
205 try:
206 activate, deactivate = templates[flavor]
207 except KeyError:
208 raise ConanException("Unrecognized flavor: %s" % flavor)
209 activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)
210
211 env_filepath = os.path.abspath(os.path.join(folder, env_filename))
212 activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,
213 deactivate_tpl, venv_name, env_filepath)
214
215 result = {activate_filename: activate,
216 deactivate_filename: deactivate,
217 env_filename: envfile}
218 return result
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conans/client/envvars/environment.py b/conans/client/envvars/environment.py
--- a/conans/client/envvars/environment.py
+++ b/conans/client/envvars/environment.py
@@ -117,7 +117,7 @@
if flavor == PS1_FLAVOR:
return "$env:%s" % name
# flavor == sh
- return "${%s+ $%s}" % (name, name) if append_with_spaces else "${%s+:$%s}" % (name, name)
+ return "${%s:+ $%s}" % (name, name) if append_with_spaces else "${%s:+:$%s}" % (name, name)
def _format_values(flavor, variables, append_with_spaces):
@@ -143,7 +143,10 @@
placeholder = _variable_placeholder(flavor, name, append_space)
if append_space:
# Variables joined with spaces look like: CPPFLAGS="one two three"
- value = " ".join(value+[placeholder])
+ if flavor == SH_FLAVOR:
+ value = " ".join(value) + placeholder
+ else:
+ value = " ".join(value + [placeholder])
value = "\"%s\"" % value if quote_elements else value
else:
# Quoted variables joined with pathset may look like:
|
{"golden_diff": "diff --git a/conans/client/envvars/environment.py b/conans/client/envvars/environment.py\n--- a/conans/client/envvars/environment.py\n+++ b/conans/client/envvars/environment.py\n@@ -117,7 +117,7 @@\n if flavor == PS1_FLAVOR:\n return \"$env:%s\" % name\n # flavor == sh\n- return \"${%s+ $%s}\" % (name, name) if append_with_spaces else \"${%s+:$%s}\" % (name, name)\n+ return \"${%s:+ $%s}\" % (name, name) if append_with_spaces else \"${%s:+:$%s}\" % (name, name)\n \n \n def _format_values(flavor, variables, append_with_spaces):\n@@ -143,7 +143,10 @@\n placeholder = _variable_placeholder(flavor, name, append_space)\n if append_space:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n- value = \" \".join(value+[placeholder])\n+ if flavor == SH_FLAVOR:\n+ value = \" \".join(value) + placeholder\n+ else:\n+ value = \" \".join(value + [placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n", "issue": "[bug] Path expansion in virtualrunenv generator adds extra colon for empty path\n### Environment Details\r\n * Operating System+version: Debian GNU/Linux 10\r\n * Conan version: 1.34.0-dev\r\n * Python version: 3.7.0\r\n * Shell: GNU bash, version 5.0.3(1)-release (x86_64-pc-linux-gnu)\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n~~~bash\r\nexport LD_LIBRARY_PATH=\r\n# install a recipe with \"-g virtualrunenv\"\r\nsource activate.sh\r\necho $LD_LIBRARY_PATH\r\n~~~\r\nThe last echo prints path with extra \":\" at the end.\r\n\r\n### Explanation\r\n\r\nAccording to [Bash expansion](https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html):\r\n> **${parameter:+word}**\r\n> If parameter is null or unset, nothing is substituted, otherwise the expansion of word is substituted.\r\n\r\nConan uses:\r\n~~~bash\r\nPATH=\"...\"${PATH+:$PATH}\r\n~~~\r\n\r\nUsing `${var+:$var}`:\r\n~~~bash\r\nexport DEFINED_EMPTY_VAR=\r\necho \"xxx\"${DEFINED_EMPTY_VAR+:$DEFINED_EMPTY_VAR}\r\necho \"xxx\"${UNDEFINED_VAR+:$UNDEFINED_VAR}\r\n~~~\r\nResults\r\n~~~\r\nxxx:\r\nxxx\r\n~~~\r\n\r\nCorrect form `${var:+:$var}`:\r\n~~~bash\r\nexport DEFINED_EMPTY_VAR=\r\necho \"xxx\"${DEFINED_EMPTY_VAR:+:$DEFINED_EMPTY_VAR}\r\necho \"xxx\"${UNDEFINED_VAR:+:$UNDEFINED_VAR}\r\n~~~\r\nResults\r\n~~~\r\nxxx\r\nxxx\r\n~~~\n", "before_files": [{"content": "import os\nimport platform\nimport textwrap\nfrom collections import OrderedDict\n\nfrom jinja2 import Template\n\nfrom conans.errors import ConanException\nfrom conans.util.files import normalize\n\nsh_activate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n\n {%- for it in modified_vars %}\n export CONAN_OLD_{{it}}=\"${{it}}\"\n {%- endfor %}\n\n while read -r line; do\n LINE=\"$(eval echo $line)\";\n export \"$LINE\";\n done < \"{{ environment_file }}\"\n\n export CONAN_OLD_PS1=\"$PS1\"\n export PS1=\"({{venv_name}}) $PS1\"\n\"\"\")\n\nsh_deactivate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n export PS1=\"$CONAN_OLD_PS1\"\n unset CONAN_OLD_PS1\n\n {% for it in modified_vars %}\n export {{it}}=\"$CONAN_OLD_{{it}}\"\n unset CONAN_OLD_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n unset {{it}}\n {%- endfor %}\n\"\"\")\n\nbat_activate = textwrap.dedent(\"\"\"\\\n @echo off\n\n {%- for it in modified_vars %}\n SET \"CONAN_OLD_{{it}}=%{{it}}%\"\n {%- endfor %}\n\n FOR /F \"usebackq tokens=1,* delims==\" %%i IN (\"{{ environment_file }}\") DO (\n CALL SET \"%%i=%%j\"\n )\n\n SET \"CONAN_OLD_PROMPT=%PROMPT%\"\n SET \"PROMPT=({{venv_name}}) %PROMPT%\"\n\"\"\")\n\nbat_deactivate = textwrap.dedent(\"\"\"\\\n @echo off\n\n SET \"PROMPT=%CONAN_OLD_PROMPT%\"\n SET \"CONAN_OLD_PROMPT=\"\n\n {% for it in modified_vars %}\n SET \"{{it}}=%CONAN_OLD_{{it}}%\"\n SET \"CONAN_OLD_{{it}}=\"\n {%- endfor %}\n {%- for it in new_vars %}\n SET \"{{it}}=\"\n {%- endfor %}\n\"\"\")\n\nps1_activate = textwrap.dedent(\"\"\"\\\n {%- for it in modified_vars %}\n $env:CONAN_OLD_{{venv_name}}_{{it}}=$env:{{it}}\n {%- endfor %}\n\n foreach ($line in Get-Content \"{{ environment_file }}\") {\n $var,$value = $line -split '=',2\n $value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)\n Set-Item env:\\\\$var -Value \"$value_expanded\"\n }\n\n function global:_old_conan_{{venv_name}}_prompt {\"\"}\n $function:_old_conan_{{venv_name}}_prompt = $function:prompt\n function global:prompt {\n write-host \"({{venv_name}}) \" -nonewline; & $function:_old_conan_{{venv_name}}_prompt\n }\n\"\"\")\n\nps1_deactivate = textwrap.dedent(\"\"\"\\\n $function:prompt = $function:_old_conan_{{venv_name}}_prompt\n remove-item function:_old_conan_{{venv_name}}_prompt\n\n {% for it in modified_vars %}\n $env:{{it}}=$env:CONAN_OLD_{{venv_name}}_{{it}}\n Remove-Item env:CONAN_OLD_{{venv_name}}_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n Remove-Item env:{{it}}\n {%- endfor %}\n\"\"\")\n\n\nBAT_FLAVOR = \"bat\"\nPS1_FLAVOR = \"ps1\"\nSH_FLAVOR = \"sh\"\n\n\ndef _variable_placeholder(flavor, name, append_with_spaces):\n \"\"\"\n :param flavor: flavor of the execution environment\n :param name: variable name\n :return: placeholder for the variable name formatted for a certain execution environment.\n (e.g., cmd, ps1, sh).\n \"\"\"\n if flavor == BAT_FLAVOR:\n return \"%{}%\".format(name)\n if flavor == PS1_FLAVOR:\n return \"$env:%s\" % name\n # flavor == sh\n return \"${%s+ $%s}\" % (name, name) if append_with_spaces else \"${%s+:$%s}\" % (name, name)\n\n\ndef _format_values(flavor, variables, append_with_spaces):\n \"\"\"\n Formats the values for the different supported script language flavors.\n :param flavor: flavor of the execution environment\n :param variables: variables to be formatted\n :return:\n \"\"\"\n\n if flavor in [BAT_FLAVOR, PS1_FLAVOR] and platform.system() == \"Windows\":\n path_sep, quote_elements = \";\", False\n elif flavor == PS1_FLAVOR:\n path_sep, quote_elements = \":\", False\n else:\n path_sep, quote_elements = \":\", True\n\n for name, value in variables:\n # activate values\n if isinstance(value, list):\n value = list(OrderedDict.fromkeys(value)) # Avoid repeated entries, while keeping order\n append_space = name in append_with_spaces\n placeholder = _variable_placeholder(flavor, name, append_space)\n if append_space:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n value = \" \".join(value+[placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n # PATH=\"one path\":\"two paths\"\n # Unquoted variables joined with pathset may look like: PATH=one path;two paths\n value = [\"\\\"%s\\\"\" % v for v in value] if quote_elements else value\n if flavor == SH_FLAVOR:\n value = path_sep.join(value) + placeholder\n else:\n value = path_sep.join(value + [placeholder])\n else:\n # single value\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n if platform.system() != \"Windows\":\n value = value.replace(\"\\\\\", \"\\\\\\\\\")\n\n # deactivate values\n existing = name in os.environ\n yield name, value, existing\n\n\ndef _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,\n env_filepath):\n ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))\n modified_vars = [name for name, _, existing in ret if existing]\n new_vars = [name for name, _, existing in ret if not existing]\n\n activate_content = activate_tpl.render(environment_file=env_filepath,\n modified_vars=modified_vars, new_vars=new_vars,\n venv_name=venv_name)\n deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars, \n venv_name=venv_name)\n\n environment_lines = [\"{}={}\".format(name, value) for name, value, _ in ret]\n # This blank line is important, otherwise the script doens't process last line\n environment_lines.append('')\n\n if flavor == SH_FLAVOR:\n # replace CRLF->LF guarantee it is always LF, irrespective of current .py file\n activate_content = activate_content.replace(\"\\r\\n\", \"\\n\")\n deactivate_content = deactivate_content.replace(\"\\r\\n\", \"\\n\")\n environment = \"\\n\".join(environment_lines)\n else:\n activate_content = normalize(activate_content)\n deactivate_content = normalize(deactivate_content)\n environment = os.linesep.join(environment_lines)\n\n return activate_content, deactivate_content, environment\n\n\ndef env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):\n env_filename = \"environment{}.{}.env\".format(name, flavor)\n activate_filename = \"activate{}.{}\".format(name, flavor)\n deactivate_filename = \"deactivate{}.{}\".format(name, flavor)\n\n templates = {SH_FLAVOR: (sh_activate, sh_deactivate),\n BAT_FLAVOR: (bat_activate, bat_deactivate),\n PS1_FLAVOR: (ps1_activate, ps1_deactivate)}\n try:\n activate, deactivate = templates[flavor]\n except KeyError:\n raise ConanException(\"Unrecognized flavor: %s\" % flavor)\n activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)\n\n env_filepath = os.path.abspath(os.path.join(folder, env_filename))\n activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,\n deactivate_tpl, venv_name, env_filepath)\n\n result = {activate_filename: activate,\n deactivate_filename: deactivate,\n env_filename: envfile}\n return result\n", "path": "conans/client/envvars/environment.py"}], "after_files": [{"content": "import os\nimport platform\nimport textwrap\nfrom collections import OrderedDict\n\nfrom jinja2 import Template\n\nfrom conans.errors import ConanException\nfrom conans.util.files import normalize\n\nsh_activate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n\n {%- for it in modified_vars %}\n export CONAN_OLD_{{it}}=\"${{it}}\"\n {%- endfor %}\n\n while read -r line; do\n LINE=\"$(eval echo $line)\";\n export \"$LINE\";\n done < \"{{ environment_file }}\"\n\n export CONAN_OLD_PS1=\"$PS1\"\n export PS1=\"({{venv_name}}) $PS1\"\n\"\"\")\n\nsh_deactivate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n export PS1=\"$CONAN_OLD_PS1\"\n unset CONAN_OLD_PS1\n\n {% for it in modified_vars %}\n export {{it}}=\"$CONAN_OLD_{{it}}\"\n unset CONAN_OLD_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n unset {{it}}\n {%- endfor %}\n\"\"\")\n\nbat_activate = textwrap.dedent(\"\"\"\\\n @echo off\n\n {%- for it in modified_vars %}\n SET \"CONAN_OLD_{{it}}=%{{it}}%\"\n {%- endfor %}\n\n FOR /F \"usebackq tokens=1,* delims==\" %%i IN (\"{{ environment_file }}\") DO (\n CALL SET \"%%i=%%j\"\n )\n\n SET \"CONAN_OLD_PROMPT=%PROMPT%\"\n SET \"PROMPT=({{venv_name}}) %PROMPT%\"\n\"\"\")\n\nbat_deactivate = textwrap.dedent(\"\"\"\\\n @echo off\n\n SET \"PROMPT=%CONAN_OLD_PROMPT%\"\n SET \"CONAN_OLD_PROMPT=\"\n\n {% for it in modified_vars %}\n SET \"{{it}}=%CONAN_OLD_{{it}}%\"\n SET \"CONAN_OLD_{{it}}=\"\n {%- endfor %}\n {%- for it in new_vars %}\n SET \"{{it}}=\"\n {%- endfor %}\n\"\"\")\n\nps1_activate = textwrap.dedent(\"\"\"\\\n {%- for it in modified_vars %}\n $env:CONAN_OLD_{{venv_name}}_{{it}}=$env:{{it}}\n {%- endfor %}\n\n foreach ($line in Get-Content \"{{ environment_file }}\") {\n $var,$value = $line -split '=',2\n $value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)\n Set-Item env:\\\\$var -Value \"$value_expanded\"\n }\n\n function global:_old_conan_{{venv_name}}_prompt {\"\"}\n $function:_old_conan_{{venv_name}}_prompt = $function:prompt\n function global:prompt {\n write-host \"({{venv_name}}) \" -nonewline; & $function:_old_conan_{{venv_name}}_prompt\n }\n\"\"\")\n\nps1_deactivate = textwrap.dedent(\"\"\"\\\n $function:prompt = $function:_old_conan_{{venv_name}}_prompt\n remove-item function:_old_conan_{{venv_name}}_prompt\n\n {% for it in modified_vars %}\n $env:{{it}}=$env:CONAN_OLD_{{venv_name}}_{{it}}\n Remove-Item env:CONAN_OLD_{{venv_name}}_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n Remove-Item env:{{it}}\n {%- endfor %}\n\"\"\")\n\n\nBAT_FLAVOR = \"bat\"\nPS1_FLAVOR = \"ps1\"\nSH_FLAVOR = \"sh\"\n\n\ndef _variable_placeholder(flavor, name, append_with_spaces):\n \"\"\"\n :param flavor: flavor of the execution environment\n :param name: variable name\n :return: placeholder for the variable name formatted for a certain execution environment.\n (e.g., cmd, ps1, sh).\n \"\"\"\n if flavor == BAT_FLAVOR:\n return \"%{}%\".format(name)\n if flavor == PS1_FLAVOR:\n return \"$env:%s\" % name\n # flavor == sh\n return \"${%s:+ $%s}\" % (name, name) if append_with_spaces else \"${%s:+:$%s}\" % (name, name)\n\n\ndef _format_values(flavor, variables, append_with_spaces):\n \"\"\"\n Formats the values for the different supported script language flavors.\n :param flavor: flavor of the execution environment\n :param variables: variables to be formatted\n :return:\n \"\"\"\n\n if flavor in [BAT_FLAVOR, PS1_FLAVOR] and platform.system() == \"Windows\":\n path_sep, quote_elements = \";\", False\n elif flavor == PS1_FLAVOR:\n path_sep, quote_elements = \":\", False\n else:\n path_sep, quote_elements = \":\", True\n\n for name, value in variables:\n # activate values\n if isinstance(value, list):\n value = list(OrderedDict.fromkeys(value)) # Avoid repeated entries, while keeping order\n append_space = name in append_with_spaces\n placeholder = _variable_placeholder(flavor, name, append_space)\n if append_space:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n if flavor == SH_FLAVOR:\n value = \" \".join(value) + placeholder\n else:\n value = \" \".join(value + [placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n # PATH=\"one path\":\"two paths\"\n # Unquoted variables joined with pathset may look like: PATH=one path;two paths\n value = [\"\\\"%s\\\"\" % v for v in value] if quote_elements else value\n if flavor == SH_FLAVOR:\n value = path_sep.join(value) + placeholder\n else:\n value = path_sep.join(value + [placeholder])\n else:\n # single value\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n if platform.system() != \"Windows\":\n value = value.replace(\"\\\\\", \"\\\\\\\\\")\n\n # deactivate values\n existing = name in os.environ\n yield name, value, existing\n\n\ndef _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,\n env_filepath):\n ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))\n modified_vars = [name for name, _, existing in ret if existing]\n new_vars = [name for name, _, existing in ret if not existing]\n\n activate_content = activate_tpl.render(environment_file=env_filepath,\n modified_vars=modified_vars, new_vars=new_vars,\n venv_name=venv_name)\n deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars, \n venv_name=venv_name)\n\n environment_lines = [\"{}={}\".format(name, value) for name, value, _ in ret]\n # This blank line is important, otherwise the script doens't process last line\n environment_lines.append('')\n\n if flavor == SH_FLAVOR:\n # replace CRLF->LF guarantee it is always LF, irrespective of current .py file\n activate_content = activate_content.replace(\"\\r\\n\", \"\\n\")\n deactivate_content = deactivate_content.replace(\"\\r\\n\", \"\\n\")\n environment = \"\\n\".join(environment_lines)\n else:\n activate_content = normalize(activate_content)\n deactivate_content = normalize(deactivate_content)\n environment = os.linesep.join(environment_lines)\n\n return activate_content, deactivate_content, environment\n\n\ndef env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):\n env_filename = \"environment{}.{}.env\".format(name, flavor)\n activate_filename = \"activate{}.{}\".format(name, flavor)\n deactivate_filename = \"deactivate{}.{}\".format(name, flavor)\n\n templates = {SH_FLAVOR: (sh_activate, sh_deactivate),\n BAT_FLAVOR: (bat_activate, bat_deactivate),\n PS1_FLAVOR: (ps1_activate, ps1_deactivate)}\n try:\n activate, deactivate = templates[flavor]\n except KeyError:\n raise ConanException(\"Unrecognized flavor: %s\" % flavor)\n activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)\n\n env_filepath = os.path.abspath(os.path.join(folder, env_filename))\n activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,\n deactivate_tpl, venv_name, env_filepath)\n\n result = {activate_filename: activate,\n deactivate_filename: deactivate,\n env_filename: envfile}\n return result\n", "path": "conans/client/envvars/environment.py"}]}
| 3,052 | 296 |
gh_patches_debug_34298
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1398
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add price to Turkey
I am not sure which of the ticker [prices](https://www.epias.com.tr/en) to use but section 6.7.3 BLOCK ORDER BOOK in [this guide](https://www.epias.com.tr/wp-content/uploads/2017/06/INTRADAY_MARKET_USER_GUIDE_ENG_18.09.2017.pdf) may help.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/TR.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import requests
4 import re
5 import json
6 import arrow
7
8 SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')
9 TIMEZONE = 'Europe/Istanbul'
10 URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'
11 EMPTY_DAY = -1
12
13 MAP_GENERATION = {
14 'akarsu': 'hydro',
15 'barajli': 'hydro',
16 'dogalgaz': 'gas',
17 'lng': 'gas',
18 'lpg': 'gas',
19 'jeotermal': 'geothermal',
20 'taskomur': 'coal',
21 'asfaltitkomur': 'coal',
22 'linyit': 'coal',
23 'ithalkomur': 'coal',
24 'ruzgar': 'wind',
25 'fueloil': 'oil',
26 'biyokutle': 'biomass',
27 'nafta': 'oil',
28 'gunes': 'solar',
29 'nukleer': 'nuclear',
30 'kojenerasyon': 'unknown',
31 'motorin': 'oil',
32 }
33
34
35 def as_float(prod):
36 """Convert json values to float and sum all production for a further use"""
37 prod['total'] = 0.0
38 if isinstance(prod, dict) and 'yuk' not in prod.keys():
39 for prod_type, prod_val in prod.items():
40 prod[prod_type] = float(prod_val)
41 prod['total'] += prod[prod_type]
42 return prod
43
44
45 def get_last_data_idx(productions):
46 """
47 Find index of the last production
48 :param productions: list of 24 production dict objects
49 :return: (int) index of the newest data or -1 if no data (empty day)
50 """
51 for i in range(len(productions)):
52 if productions[i]['total'] < 1000:
53 return i - 1
54 return len(productions) - 1 # full day
55
56
57 def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):
58 """
59 Requests the last known production mix (in MW) of a given country
60 Arguments:
61 zone_key (optional) -- used in case a parser is able to fetch multiple countries
62 session (optional) -- request session passed in order to re-use an existing session
63 Return:
64 A list of dictionaries in the form:
65 {
66 'zoneKey': 'FR',
67 'datetime': '2017-01-01T00:00:00Z',
68 'production': {
69 'biomass': 0.0,
70 'coal': 0.0,
71 'gas': 0.0,
72 'hydro': 0.0,
73 'nuclear': null,
74 'oil': 0.0,
75 'solar': 0.0,
76 'wind': 0.0,
77 'geothermal': 0.0,
78 'unknown': 0.0
79 },
80 'storage': {
81 'hydro': -10.0,
82 },
83 'source': 'mysource.com'
84 }
85 """
86 if target_datetime:
87 raise NotImplementedError('This parser is not yet able to parse past dates')
88
89 session = None # Explicitely make a new session to avoid caching from their server...
90 r = session or requests.session()
91 tr_datetime = arrow.now().to('Europe/Istanbul').floor('day')
92 response = r.get(URL, verify=False)
93 str_data = re.search(SEARCH_DATA, response.text)
94
95 production_by_hour = []
96 if str_data:
97 productions = json.loads(str_data.group('data'), object_hook=as_float)
98 last_data_index = get_last_data_idx(productions)
99 valid_production = productions[:last_data_index + 1]
100 if last_data_index != EMPTY_DAY:
101 for datapoint in valid_production:
102 data = {
103 'zoneKey': zone_key,
104 'production': {},
105 'storage': {},
106 'source': 'ytbs.teias.gov.tr',
107 'datetime': None
108 }
109 data['production'] = dict(zip(MAP_GENERATION.values(), [0] * len(MAP_GENERATION)))
110 for prod_type, prod_val in datapoint.items():
111 if prod_type in MAP_GENERATION.keys():
112 data['production'][MAP_GENERATION[prod_type]] += prod_val
113 elif prod_type not in ['total', 'uluslarasi', 'saat']:
114 logger.warning('Warning: %s (%d) is missing in mapping!' % (prod_type, prod_val))
115
116 try:
117 data['datetime'] = tr_datetime.replace(hour=int(datapoint['saat'])).datetime
118 except ValueError:
119 # 24 is not a valid hour!
120 data['datetime'] = tr_datetime.datetime
121
122 production_by_hour.append(data)
123 else:
124 raise Exception('Extracted data was None')
125
126 return production_by_hour
127
128
129 if __name__ == '__main__':
130 """Main method, never used by the Electricity Map backend, but handy for testing."""
131
132 print('fetch_production() ->')
133 print(fetch_production())
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/TR.py b/parsers/TR.py
--- a/parsers/TR.py
+++ b/parsers/TR.py
@@ -4,12 +4,17 @@
import re
import json
import arrow
+from bs4 import BeautifulSoup
+import datetime as dt
SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')
TIMEZONE = 'Europe/Istanbul'
URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'
EMPTY_DAY = -1
+PRICE_URL = 'https://seffaflik.epias.com.tr/transparency/piyasalar/' \
+ 'gop/ptf.xhtml'
+
MAP_GENERATION = {
'akarsu': 'hydro',
'barajli': 'hydro',
@@ -54,6 +59,35 @@
return len(productions) - 1 # full day
+def fetch_price():
+ soup = BeautifulSoup(requests.get(PRICE_URL).text, 'html.parser')
+ cells = soup.select('.TexAlCenter')
+
+ # data is in td elements with class "TexAlCenter" and role "gridcell"
+ data = list()
+ for cell in cells:
+ if cell.attrs.get('role', '') != 'gridcell':
+ continue
+ data.append(cell.text)
+
+ dates = [dt.datetime.strptime(val, '%d/%m/%Y').date()
+ for i, val in enumerate(data) if i % 3 == 0]
+ times = [dt.datetime.strptime(val, '%H:%M').time()
+ for i, val in enumerate(data) if i % 3 == 1]
+ prices = [float(val.replace(',', '.'))
+ for i, val in enumerate(data) if i % 3 == 2]
+
+ datapoints = [{
+ 'zoneKey': 'TR',
+ 'currency': 'TRY',
+ 'datetime': arrow.get(
+ dt.datetime.combine(date, time)).to('Europe/Istanbul').datetime,
+ 'price': price,
+ 'source': 'epias.com.tr'
+ } for date, time, price in zip(dates, times, prices)]
+ return datapoints
+
+
def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):
"""
Requests the last known production mix (in MW) of a given country
@@ -127,7 +161,10 @@
if __name__ == '__main__':
- """Main method, never used by the Electricity Map backend, but handy for testing."""
+ """Main method, never used by the Electricity Map backend, but handy for
+ testing."""
print('fetch_production() ->')
print(fetch_production())
+ print('fetch_price() ->')
+ print(fetch_price())
|
{"golden_diff": "diff --git a/parsers/TR.py b/parsers/TR.py\n--- a/parsers/TR.py\n+++ b/parsers/TR.py\n@@ -4,12 +4,17 @@\n import re\n import json\n import arrow\n+from bs4 import BeautifulSoup\n+import datetime as dt\n \n SEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')\n TIMEZONE = 'Europe/Istanbul'\n URL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'\n EMPTY_DAY = -1\n \n+PRICE_URL = 'https://seffaflik.epias.com.tr/transparency/piyasalar/' \\\n+ 'gop/ptf.xhtml'\n+\n MAP_GENERATION = {\n 'akarsu': 'hydro',\n 'barajli': 'hydro',\n@@ -54,6 +59,35 @@\n return len(productions) - 1 # full day\n \n \n+def fetch_price():\n+ soup = BeautifulSoup(requests.get(PRICE_URL).text, 'html.parser')\n+ cells = soup.select('.TexAlCenter')\n+\n+ # data is in td elements with class \"TexAlCenter\" and role \"gridcell\"\n+ data = list()\n+ for cell in cells:\n+ if cell.attrs.get('role', '') != 'gridcell':\n+ continue\n+ data.append(cell.text)\n+\n+ dates = [dt.datetime.strptime(val, '%d/%m/%Y').date()\n+ for i, val in enumerate(data) if i % 3 == 0]\n+ times = [dt.datetime.strptime(val, '%H:%M').time()\n+ for i, val in enumerate(data) if i % 3 == 1]\n+ prices = [float(val.replace(',', '.'))\n+ for i, val in enumerate(data) if i % 3 == 2]\n+\n+ datapoints = [{\n+ 'zoneKey': 'TR',\n+ 'currency': 'TRY',\n+ 'datetime': arrow.get(\n+ dt.datetime.combine(date, time)).to('Europe/Istanbul').datetime,\n+ 'price': price,\n+ 'source': 'epias.com.tr'\n+ } for date, time, price in zip(dates, times, prices)]\n+ return datapoints\n+\n+\n def fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n@@ -127,7 +161,10 @@\n \n \n if __name__ == '__main__':\n- \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n+ \"\"\"Main method, never used by the Electricity Map backend, but handy for\n+ testing.\"\"\"\n \n print('fetch_production() ->')\n print(fetch_production())\n+ print('fetch_price() ->')\n+ print(fetch_price())\n", "issue": "Add price to Turkey\nI am not sure which of the ticker [prices](https://www.epias.com.tr/en) to use but section 6.7.3 BLOCK ORDER BOOK in [this guide](https://www.epias.com.tr/wp-content/uploads/2017/06/INTRADAY_MARKET_USER_GUIDE_ENG_18.09.2017.pdf) may help.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport requests\nimport re\nimport json\nimport arrow\n\nSEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')\nTIMEZONE = 'Europe/Istanbul'\nURL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'\nEMPTY_DAY = -1\n\nMAP_GENERATION = {\n 'akarsu': 'hydro',\n 'barajli': 'hydro',\n 'dogalgaz': 'gas',\n 'lng': 'gas',\n 'lpg': 'gas',\n 'jeotermal': 'geothermal',\n 'taskomur': 'coal',\n 'asfaltitkomur': 'coal',\n 'linyit': 'coal',\n 'ithalkomur': 'coal',\n 'ruzgar': 'wind',\n 'fueloil': 'oil',\n 'biyokutle': 'biomass',\n 'nafta': 'oil',\n 'gunes': 'solar',\n 'nukleer': 'nuclear',\n 'kojenerasyon': 'unknown',\n 'motorin': 'oil',\n}\n\n\ndef as_float(prod):\n \"\"\"Convert json values to float and sum all production for a further use\"\"\"\n prod['total'] = 0.0\n if isinstance(prod, dict) and 'yuk' not in prod.keys():\n for prod_type, prod_val in prod.items():\n prod[prod_type] = float(prod_val)\n prod['total'] += prod[prod_type]\n return prod\n\n\ndef get_last_data_idx(productions):\n \"\"\"\n Find index of the last production\n :param productions: list of 24 production dict objects\n :return: (int) index of the newest data or -1 if no data (empty day)\n \"\"\"\n for i in range(len(productions)):\n if productions[i]['total'] < 1000:\n return i - 1\n return len(productions) - 1 # full day\n\n\ndef fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n session = None # Explicitely make a new session to avoid caching from their server...\n r = session or requests.session()\n tr_datetime = arrow.now().to('Europe/Istanbul').floor('day')\n response = r.get(URL, verify=False)\n str_data = re.search(SEARCH_DATA, response.text)\n\n production_by_hour = []\n if str_data:\n productions = json.loads(str_data.group('data'), object_hook=as_float)\n last_data_index = get_last_data_idx(productions)\n valid_production = productions[:last_data_index + 1]\n if last_data_index != EMPTY_DAY:\n for datapoint in valid_production:\n data = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ytbs.teias.gov.tr',\n 'datetime': None\n }\n data['production'] = dict(zip(MAP_GENERATION.values(), [0] * len(MAP_GENERATION)))\n for prod_type, prod_val in datapoint.items():\n if prod_type in MAP_GENERATION.keys():\n data['production'][MAP_GENERATION[prod_type]] += prod_val\n elif prod_type not in ['total', 'uluslarasi', 'saat']:\n logger.warning('Warning: %s (%d) is missing in mapping!' % (prod_type, prod_val))\n\n try:\n data['datetime'] = tr_datetime.replace(hour=int(datapoint['saat'])).datetime\n except ValueError:\n # 24 is not a valid hour!\n data['datetime'] = tr_datetime.datetime\n\n production_by_hour.append(data)\n else:\n raise Exception('Extracted data was None')\n\n return production_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/TR.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport requests\nimport re\nimport json\nimport arrow\nfrom bs4 import BeautifulSoup\nimport datetime as dt\n\nSEARCH_DATA = re.compile(r'var gunlukUretimEgrisiData = (?P<data>.*);')\nTIMEZONE = 'Europe/Istanbul'\nURL = 'https://ytbs.teias.gov.tr/ytbs/frm_login.jsf'\nEMPTY_DAY = -1\n\nPRICE_URL = 'https://seffaflik.epias.com.tr/transparency/piyasalar/' \\\n 'gop/ptf.xhtml'\n\nMAP_GENERATION = {\n 'akarsu': 'hydro',\n 'barajli': 'hydro',\n 'dogalgaz': 'gas',\n 'lng': 'gas',\n 'lpg': 'gas',\n 'jeotermal': 'geothermal',\n 'taskomur': 'coal',\n 'asfaltitkomur': 'coal',\n 'linyit': 'coal',\n 'ithalkomur': 'coal',\n 'ruzgar': 'wind',\n 'fueloil': 'oil',\n 'biyokutle': 'biomass',\n 'nafta': 'oil',\n 'gunes': 'solar',\n 'nukleer': 'nuclear',\n 'kojenerasyon': 'unknown',\n 'motorin': 'oil',\n}\n\n\ndef as_float(prod):\n \"\"\"Convert json values to float and sum all production for a further use\"\"\"\n prod['total'] = 0.0\n if isinstance(prod, dict) and 'yuk' not in prod.keys():\n for prod_type, prod_val in prod.items():\n prod[prod_type] = float(prod_val)\n prod['total'] += prod[prod_type]\n return prod\n\n\ndef get_last_data_idx(productions):\n \"\"\"\n Find index of the last production\n :param productions: list of 24 production dict objects\n :return: (int) index of the newest data or -1 if no data (empty day)\n \"\"\"\n for i in range(len(productions)):\n if productions[i]['total'] < 1000:\n return i - 1\n return len(productions) - 1 # full day\n\n\ndef fetch_price():\n soup = BeautifulSoup(requests.get(PRICE_URL).text, 'html.parser')\n cells = soup.select('.TexAlCenter')\n\n # data is in td elements with class \"TexAlCenter\" and role \"gridcell\"\n data = list()\n for cell in cells:\n if cell.attrs.get('role', '') != 'gridcell':\n continue\n data.append(cell.text)\n\n dates = [dt.datetime.strptime(val, '%d/%m/%Y').date()\n for i, val in enumerate(data) if i % 3 == 0]\n times = [dt.datetime.strptime(val, '%H:%M').time()\n for i, val in enumerate(data) if i % 3 == 1]\n prices = [float(val.replace(',', '.'))\n for i, val in enumerate(data) if i % 3 == 2]\n\n datapoints = [{\n 'zoneKey': 'TR',\n 'currency': 'TRY',\n 'datetime': arrow.get(\n dt.datetime.combine(date, time)).to('Europe/Istanbul').datetime,\n 'price': price,\n 'source': 'epias.com.tr'\n } for date, time, price in zip(dates, times, prices)]\n return datapoints\n\n\ndef fetch_production(zone_key='TR', session=None, target_datetime=None, logger=None):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A list of dictionaries in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n session = None # Explicitely make a new session to avoid caching from their server...\n r = session or requests.session()\n tr_datetime = arrow.now().to('Europe/Istanbul').floor('day')\n response = r.get(URL, verify=False)\n str_data = re.search(SEARCH_DATA, response.text)\n\n production_by_hour = []\n if str_data:\n productions = json.loads(str_data.group('data'), object_hook=as_float)\n last_data_index = get_last_data_idx(productions)\n valid_production = productions[:last_data_index + 1]\n if last_data_index != EMPTY_DAY:\n for datapoint in valid_production:\n data = {\n 'zoneKey': zone_key,\n 'production': {},\n 'storage': {},\n 'source': 'ytbs.teias.gov.tr',\n 'datetime': None\n }\n data['production'] = dict(zip(MAP_GENERATION.values(), [0] * len(MAP_GENERATION)))\n for prod_type, prod_val in datapoint.items():\n if prod_type in MAP_GENERATION.keys():\n data['production'][MAP_GENERATION[prod_type]] += prod_val\n elif prod_type not in ['total', 'uluslarasi', 'saat']:\n logger.warning('Warning: %s (%d) is missing in mapping!' % (prod_type, prod_val))\n\n try:\n data['datetime'] = tr_datetime.replace(hour=int(datapoint['saat'])).datetime\n except ValueError:\n # 24 is not a valid hour!\n data['datetime'] = tr_datetime.datetime\n\n production_by_hour.append(data)\n else:\n raise Exception('Extracted data was None')\n\n return production_by_hour\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for\n testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n", "path": "parsers/TR.py"}]}
| 1,772 | 630 |
gh_patches_debug_39836
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-5180
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Incorrect Example for the Inference with Prefix
### Your current environment
```text
PyTorch version: 2.3.0+cu121
Is debug build: False
CUDA used to build PyTorch: 12.1
ROCM used to build PyTorch: N/A
OS: Ubuntu 22.04.4 LTS (x86_64)
GCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0
Clang version: Could not collect
CMake version: version 3.29.3
Libc version: glibc-2.35
Python version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)
Python platform: Linux-5.15.0-97-generic-x86_64-with-glibc2.35
Is CUDA available: True
CUDA runtime version: 12.2.140
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: NVIDIA A100 80GB PCIe
GPU 1: NVIDIA A100 80GB PCIe
GPU 2: NVIDIA A100 80GB PCIe
GPU 3: NVIDIA A100 80GB PCIe
Nvidia driver version: 535.161.07
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Address sizes: 48 bits physical, 48 bits virtual
Byte Order: Little Endian
CPU(s): 48
On-line CPU(s) list: 0-47
Vendor ID: AuthenticAMD
Model name: AMD EPYC 7443 24-Core Processor
CPU family: 25
Model: 1
Thread(s) per core: 1
Core(s) per socket: 24
Socket(s): 2
Stepping: 1
Frequency boost: enabled
CPU max MHz: 4035,6440
CPU min MHz: 1500,0000
BogoMIPS: 5700.55
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 invpcid_single hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd amd_ppin arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca
Virtualization: AMD-V
L1d cache: 1,5 MiB (48 instances)
L1i cache: 1,5 MiB (48 instances)
L2 cache: 24 MiB (48 instances)
L3 cache: 256 MiB (8 instances)
NUMA node(s): 2
NUMA node0 CPU(s): 0-23
NUMA node1 CPU(s): 24-47
Vulnerability Gather data sampling: Not affected
Vulnerability Itlb multihit: Not affected
Vulnerability L1tf: Not affected
Vulnerability Mds: Not affected
Vulnerability Meltdown: Not affected
Vulnerability Mmio stale data: Not affected
Vulnerability Retbleed: Not affected
Vulnerability Spec rstack overflow: Mitigation; safe RET
Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
Vulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP disabled, RSB filling, PBRSB-eIBRS Not affected
Vulnerability Srbds: Not affected
Vulnerability Tsx async abort: Not affected
Versions of relevant libraries:
[pip3] flake8==7.0.0
[pip3] flake8-polyfill==1.0.2
[pip3] mypy-protobuf==3.4.0
[pip3] numpy==1.26.4
[pip3] nvidia-nccl-cu12==2.20.5
[pip3] torch==2.3.0
[pip3] torchinfo==1.8.0
[pip3] torchvision==0.18.0
[pip3] triton==2.3.0
[pip3] vllm_nccl_cu11==2.18.1.0.4.0
[pip3] vllm-nccl-cu12==2.18.1.0.4.0
[conda] Could not collectROCM Version: Could not collect
Neuron SDK Version: N/A
vLLM Version: 0.4.3
vLLM Build Flags:
CUDA Archs: Not Set; ROCm: Disabled; Neuron: Disabled
GPU Topology:
GPU0 GPU1 GPU2 GPU3 NIC0 NIC1 NIC2 NIC3 CPU Affinity NUMA Affinity GPU NUMA ID
GPU0 X NODE SYS SYS PHB PHB SYS SYS 0-23 0 N/A
GPU1 NODE X SYS SYS NODE NODE SYS SYS 0-23 0 N/A
GPU2 SYS SYS X NODE SYS SYS NODE NODE 24-47 1 N/A
GPU3 SYS SYS NODE X SYS SYS NODE NODE 24-47 1 N/A
NIC0 PHB NODE SYS SYS X PIX SYS SYS
NIC1 PHB NODE SYS SYS PIX X SYS SYS
NIC2 SYS SYS NODE NODE SYS SYS X PIX
NIC3 SYS SYS NODE NODE SYS SYS PIX X
Legend:
X = Self
SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)
NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node
PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)
PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)
PIX = Connection traversing at most a single PCIe bridge
NV# = Connection traversing a bonded set of # NVLinks
NIC Legend:
NIC0: mlx5_0
NIC1: mlx5_1
NIC2: mlx5_2
NIC3: mlx5_3
```
### 🐛 Describe the bug
Currently existing contents of the `vllm/examples/offline_inference_with_prefix.py` have notable issues, introduced during the implementation of `--enable_prefix_caching` in #2762 and its fixes in subsequent PRs. Point of this issue is to display the detected issues and provide a solid ground for upcoming PR to fix them.
---
### 1. Missleading inference speed comparison
**Intended use**: file structure suggests, that example should contain comparion of inference `w/o prefix_caching` vs `w/ prefix_caching`, comparing the model inference speed for those two options.
**Issue**: for the currently existing implementation, `llm` object is introduced once with `enable_prefix_caching=True`. After that, it is used for generation two times, and those generations are compared with each other.
As a result, both generations apply `prefix hashing`, and **_resulting inference speed comparison is irrelevant_**.
```python
# [current state of a file]
from vllm import LLM, SamplingParams
prefix = ...
prompts = [...]
sampling_params = SamplingParams(temperature=0.0)
llm = LLM(model="facebook/opt-125m", enable_prefix_caching=True)
generating_prompts = [prefix + prompt for prompt in prompts]
# [intended generation w/o prefix_caching]
outputs = llm.generate(generating_prompts, sampling_params)
...
# [intended generation w/ prefix_caching]
# The llm.generate call will batch all prompts and send the batch at once
# if resources allow. The prefix will only be cached after the first batch
# is processed, so we need to call generate once to calculate the prefix
# and cache it.
outputs = llm.generate(generating_prompts[0], sampling_params)
# Subsequent batches can leverage the cached prefix
outputs = llm.generate(generating_prompts, sampling_params)
```
**Proposed solution**: create two llm objects, named `regular_llm` and `prefix_cached_llm`, that are `w/o prefix_caching` and `w/ prefix_caching` respectively. Then, run the generation using those two models, obtaining varying results.
---
### 2. Missleading information
**Intended use**: file should cover usage details and explain how to use `enable_prefix_caching` parameter correctly.
**Issue**: currently, file contents states that _"prefix will only be cached after the first batch is processed"_, however my tests disprove that statement, you could test that using the following minimal reproducing code snippet:
```python
# [my code to show that cache works on the fly]
from vllm import LLM, SamplingParams
from time import time
prefix = (
"You are an expert school principal, skilled in effectively managing "
"faculty and staff. Draft 10-15 questions for a potential first grade "
"Head Teacher for my K-12, all-girls', independent school that emphasizes "
"community, joyful discovery, and life-long learning. The candidate is "
"coming in for a first-round panel interview for a 8th grade Math "
"teaching role. They have 5 years of previous teaching experience "
"as an assistant teacher at a co-ed, public school with experience "
"in middle school math teaching. Based on these information, fulfill "
"the following paragraph: ")
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
generating_prompts = [prefix + prompt for prompt in prompts]
sampling_params = SamplingParams(temperature=0.0)
regular_llm = LLM(model="facebook/opt-125m",
gpu_memory_utilization=0.4)
prefix_cached_llm = LLM(model="facebook/opt-125m",
gpu_memory_utilization=0.4,
enable_prefix_caching=True)
# execution time "without prefix caching"
s = time()
outputs = regular_llm.generate(generating_prompts, sampling_params)
slow_gen_time = time() - s
# execution time when "prefix is not cached yet"
# according to what is written in example file now
s = time()
outputs = prefix_cached_llm.generate(generating_prompts, sampling_params)
first_gen_time = time() - s
# execution time when "prefix is already cached"
s = time()
outputs = prefix_cached_llm.generate(generating_prompts, sampling_params)
second_gen_time = time() - s
print(round(first_gen_time / second_gen_time, 2))
print(round(slow_gen_time / second_gen_time, 2))
```
For my setup:
* speedup for "without prefix caching" vs "prefix is already cached" is `1.3`,
* speedup for "prefix is already cached" vs "prefix is not cached yet" is `1.01`.
As there is no significant speedup for the second comparison above, I assume that prefix is cached on the fly during the first answer generation by the model. Therefore, the comment on the "warmup" is missleading and it should be deleted.
**Proposed solution**: delete missleading comment on the "warmup" generation for prefix caching.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/offline_inference_with_prefix.py`
Content:
```
1 from vllm import LLM, SamplingParams
2
3 prefix = (
4 "You are an expert school principal, skilled in effectively managing "
5 "faculty and staff. Draft 10-15 questions for a potential first grade "
6 "Head Teacher for my K-12, all-girls', independent school that emphasizes "
7 "community, joyful discovery, and life-long learning. The candidate is "
8 "coming in for a first-round panel interview for a 8th grade Math "
9 "teaching role. They have 5 years of previous teaching experience "
10 "as an assistant teacher at a co-ed, public school with experience "
11 "in middle school math teaching. Based on these information, fulfill "
12 "the following paragraph: ")
13
14 # Sample prompts.
15 prompts = [
16 "Hello, my name is",
17 "The president of the United States is",
18 "The capital of France is",
19 "The future of AI is",
20 ]
21 # Create a sampling params object.
22 sampling_params = SamplingParams(temperature=0.0)
23
24 # Create an LLM.
25 llm = LLM(model="facebook/opt-125m", enable_prefix_caching=True)
26
27 generating_prompts = [prefix + prompt for prompt in prompts]
28
29 # Generate texts from the prompts. The output is a list of RequestOutput objects
30 # that contain the prompt, generated text, and other information.
31 outputs = llm.generate(generating_prompts, sampling_params)
32 # Print the outputs.
33 for output in outputs:
34 prompt = output.prompt
35 generated_text = output.outputs[0].text
36 print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
37
38 print("-" * 80)
39
40 # The llm.generate call will batch all prompts and send the batch at once
41 # if resources allow. The prefix will only be cached after the first batch
42 # is processed, so we need to call generate once to calculate the prefix
43 # and cache it.
44 outputs = llm.generate(generating_prompts[0], sampling_params)
45
46 # Subsequent batches can leverage the cached prefix
47 outputs = llm.generate(generating_prompts, sampling_params)
48
49 # Print the outputs. You should see the same outputs as before
50 for output in outputs:
51 prompt = output.prompt
52 generated_text = output.outputs[0].text
53 print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/offline_inference_with_prefix.py b/examples/offline_inference_with_prefix.py
--- a/examples/offline_inference_with_prefix.py
+++ b/examples/offline_inference_with_prefix.py
@@ -1,5 +1,8 @@
+from time import time
+
from vllm import LLM, SamplingParams
+# Common prefix.
prefix = (
"You are an expert school principal, skilled in effectively managing "
"faculty and staff. Draft 10-15 questions for a potential first grade "
@@ -18,36 +21,60 @@
"The capital of France is",
"The future of AI is",
]
+
+generating_prompts = [prefix + prompt for prompt in prompts]
+
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0)
# Create an LLM.
-llm = LLM(model="facebook/opt-125m", enable_prefix_caching=True)
+regular_llm = LLM(model="facebook/opt-125m", gpu_memory_utilization=0.4)
-generating_prompts = [prefix + prompt for prompt in prompts]
+prefix_cached_llm = LLM(model="facebook/opt-125m",
+ enable_prefix_caching=True,
+ gpu_memory_utilization=0.4)
+print("Results without `enable_prefix_caching`")
# Generate texts from the prompts. The output is a list of RequestOutput objects
# that contain the prompt, generated text, and other information.
-outputs = llm.generate(generating_prompts, sampling_params)
+start_time_regular = time()
+outputs = regular_llm.generate(generating_prompts, sampling_params)
+duration_regular = time() - start_time_regular
+
+regular_generated_texts = []
# Print the outputs.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
+ regular_generated_texts.append(generated_text)
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
print("-" * 80)
# The llm.generate call will batch all prompts and send the batch at once
-# if resources allow. The prefix will only be cached after the first batch
-# is processed, so we need to call generate once to calculate the prefix
-# and cache it.
-outputs = llm.generate(generating_prompts[0], sampling_params)
+# if resources allow.
+start_time_cached = time()
+outputs = prefix_cached_llm.generate(generating_prompts, sampling_params)
+duration_cached = time() - start_time_cached
-# Subsequent batches can leverage the cached prefix
-outputs = llm.generate(generating_prompts, sampling_params)
+print("Results with `enable_prefix_caching`")
-# Print the outputs. You should see the same outputs as before
+cached_generated_texts = []
+# Print the outputs. You should see the same outputs as before.
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
+ cached_generated_texts.append(generated_text)
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
+
+print("-" * 80)
+
+# Compare the results and display the speedup
+generated_same = all([
+ regular_generated_texts[i] == cached_generated_texts[i]
+ for i in range(len(prompts))
+])
+print(f"Generated answers are the same: {generated_same}")
+
+speedup = round(duration_regular / duration_cached, 2)
+print(f"Speed up of cached generation compared to the regular is: {speedup}")
|
{"golden_diff": "diff --git a/examples/offline_inference_with_prefix.py b/examples/offline_inference_with_prefix.py\n--- a/examples/offline_inference_with_prefix.py\n+++ b/examples/offline_inference_with_prefix.py\n@@ -1,5 +1,8 @@\n+from time import time\n+\n from vllm import LLM, SamplingParams\n \n+# Common prefix.\n prefix = (\n \"You are an expert school principal, skilled in effectively managing \"\n \"faculty and staff. Draft 10-15 questions for a potential first grade \"\n@@ -18,36 +21,60 @@\n \"The capital of France is\",\n \"The future of AI is\",\n ]\n+\n+generating_prompts = [prefix + prompt for prompt in prompts]\n+\n # Create a sampling params object.\n sampling_params = SamplingParams(temperature=0.0)\n \n # Create an LLM.\n-llm = LLM(model=\"facebook/opt-125m\", enable_prefix_caching=True)\n+regular_llm = LLM(model=\"facebook/opt-125m\", gpu_memory_utilization=0.4)\n \n-generating_prompts = [prefix + prompt for prompt in prompts]\n+prefix_cached_llm = LLM(model=\"facebook/opt-125m\",\n+ enable_prefix_caching=True,\n+ gpu_memory_utilization=0.4)\n+print(\"Results without `enable_prefix_caching`\")\n \n # Generate texts from the prompts. The output is a list of RequestOutput objects\n # that contain the prompt, generated text, and other information.\n-outputs = llm.generate(generating_prompts, sampling_params)\n+start_time_regular = time()\n+outputs = regular_llm.generate(generating_prompts, sampling_params)\n+duration_regular = time() - start_time_regular\n+\n+regular_generated_texts = []\n # Print the outputs.\n for output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n+ regular_generated_texts.append(generated_text)\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n \n print(\"-\" * 80)\n \n # The llm.generate call will batch all prompts and send the batch at once\n-# if resources allow. The prefix will only be cached after the first batch\n-# is processed, so we need to call generate once to calculate the prefix\n-# and cache it.\n-outputs = llm.generate(generating_prompts[0], sampling_params)\n+# if resources allow.\n+start_time_cached = time()\n+outputs = prefix_cached_llm.generate(generating_prompts, sampling_params)\n+duration_cached = time() - start_time_cached\n \n-# Subsequent batches can leverage the cached prefix\n-outputs = llm.generate(generating_prompts, sampling_params)\n+print(\"Results with `enable_prefix_caching`\")\n \n-# Print the outputs. You should see the same outputs as before\n+cached_generated_texts = []\n+# Print the outputs. You should see the same outputs as before.\n for output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n+ cached_generated_texts.append(generated_text)\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n+\n+print(\"-\" * 80)\n+\n+# Compare the results and display the speedup\n+generated_same = all([\n+ regular_generated_texts[i] == cached_generated_texts[i]\n+ for i in range(len(prompts))\n+])\n+print(f\"Generated answers are the same: {generated_same}\")\n+\n+speedup = round(duration_regular / duration_cached, 2)\n+print(f\"Speed up of cached generation compared to the regular is: {speedup}\")\n", "issue": "[Bug]: Incorrect Example for the Inference with Prefix \n### Your current environment\n\n```text\r\nPyTorch version: 2.3.0+cu121\r\nIs debug build: False\r\nCUDA used to build PyTorch: 12.1\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 22.04.4 LTS (x86_64)\r\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\r\nClang version: Could not collect\r\nCMake version: version 3.29.3\r\nLibc version: glibc-2.35\r\n\r\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\r\nPython platform: Linux-5.15.0-97-generic-x86_64-with-glibc2.35\r\nIs CUDA available: True\r\nCUDA runtime version: 12.2.140\r\nCUDA_MODULE_LOADING set to: LAZY\r\nGPU models and configuration: \r\nGPU 0: NVIDIA A100 80GB PCIe\r\nGPU 1: NVIDIA A100 80GB PCIe\r\nGPU 2: NVIDIA A100 80GB PCIe\r\nGPU 3: NVIDIA A100 80GB PCIe\r\n\r\nNvidia driver version: 535.161.07\r\ncuDNN version: Could not collect\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nArchitecture: x86_64\r\nCPU op-mode(s): 32-bit, 64-bit\r\nAddress sizes: 48 bits physical, 48 bits virtual\r\nByte Order: Little Endian\r\nCPU(s): 48\r\nOn-line CPU(s) list: 0-47\r\nVendor ID: AuthenticAMD\r\nModel name: AMD EPYC 7443 24-Core Processor\r\nCPU family: 25\r\nModel: 1\r\nThread(s) per core: 1\r\nCore(s) per socket: 24\r\nSocket(s): 2\r\nStepping: 1\r\nFrequency boost: enabled\r\nCPU max MHz: 4035,6440\r\nCPU min MHz: 1500,0000\r\nBogoMIPS: 5700.55\r\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 pcid sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 invpcid_single hw_pstate ssbd mba ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 invpcid cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr rdpru wbnoinvd amd_ppin arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold v_vmsave_vmload vgif v_spec_ctrl umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca\r\nVirtualization: AMD-V\r\nL1d cache: 1,5 MiB (48 instances)\r\nL1i cache: 1,5 MiB (48 instances)\r\nL2 cache: 24 MiB (48 instances)\r\nL3 cache: 256 MiB (8 instances)\r\nNUMA node(s): 2\r\nNUMA node0 CPU(s): 0-23\r\nNUMA node1 CPU(s): 24-47\r\nVulnerability Gather data sampling: Not affected\r\nVulnerability Itlb multihit: Not affected\r\nVulnerability L1tf: Not affected\r\nVulnerability Mds: Not affected\r\nVulnerability Meltdown: Not affected\r\nVulnerability Mmio stale data: Not affected\r\nVulnerability Retbleed: Not affected\r\nVulnerability Spec rstack overflow: Mitigation; safe RET\r\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\r\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\r\nVulnerability Spectre v2: Mitigation; Retpolines, IBPB conditional, IBRS_FW, STIBP disabled, RSB filling, PBRSB-eIBRS Not affected\r\nVulnerability Srbds: Not affected\r\nVulnerability Tsx async abort: Not affected\r\n\r\nVersions of relevant libraries:\r\n[pip3] flake8==7.0.0\r\n[pip3] flake8-polyfill==1.0.2\r\n[pip3] mypy-protobuf==3.4.0\r\n[pip3] numpy==1.26.4\r\n[pip3] nvidia-nccl-cu12==2.20.5\r\n[pip3] torch==2.3.0\r\n[pip3] torchinfo==1.8.0\r\n[pip3] torchvision==0.18.0\r\n[pip3] triton==2.3.0\r\n[pip3] vllm_nccl_cu11==2.18.1.0.4.0\r\n[pip3] vllm-nccl-cu12==2.18.1.0.4.0\r\n[conda] Could not collectROCM Version: Could not collect\r\nNeuron SDK Version: N/A\r\nvLLM Version: 0.4.3\r\nvLLM Build Flags:\r\nCUDA Archs: Not Set; ROCm: Disabled; Neuron: Disabled\r\nGPU Topology:\r\nGPU0 GPU1 GPU2 GPU3 NIC0 NIC1 NIC2 NIC3 CPU Affinity NUMA Affinity GPU NUMA ID\r\nGPU0 X NODE SYS SYS PHB PHB SYS SYS 0-23 0 N/A\r\nGPU1 NODE X SYS SYS NODE NODE SYS SYS 0-23 0 N/A\r\nGPU2 SYS SYS X NODE SYS SYS NODE NODE 24-47 1 N/A\r\nGPU3 SYS SYS NODE X SYS SYS NODE NODE 24-47 1 N/A\r\nNIC0 PHB NODE SYS SYS X PIX SYS SYS\r\nNIC1 PHB NODE SYS SYS PIX X SYS SYS\r\nNIC2 SYS SYS NODE NODE SYS SYS X PIX\r\nNIC3 SYS SYS NODE NODE SYS SYS PIX X \r\n\r\nLegend:\r\n\r\n X = Self\r\n SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI)\r\n NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node\r\n PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU)\r\n PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge)\r\n PIX = Connection traversing at most a single PCIe bridge\r\n NV# = Connection traversing a bonded set of # NVLinks\r\n\r\nNIC Legend:\r\n\r\n NIC0: mlx5_0\r\n NIC1: mlx5_1\r\n NIC2: mlx5_2\r\n NIC3: mlx5_3\r\n```\r\n\n\n### \ud83d\udc1b Describe the bug\n\nCurrently existing contents of the `vllm/examples/offline_inference_with_prefix.py` have notable issues, introduced during the implementation of `--enable_prefix_caching` in #2762 and its fixes in subsequent PRs. Point of this issue is to display the detected issues and provide a solid ground for upcoming PR to fix them.\r\n\r\n---\r\n### 1. Missleading inference speed comparison\r\n**Intended use**: file structure suggests, that example should contain comparion of inference `w/o prefix_caching` vs `w/ prefix_caching`, comparing the model inference speed for those two options. \r\n**Issue**: for the currently existing implementation, `llm` object is introduced once with `enable_prefix_caching=True`. After that, it is used for generation two times, and those generations are compared with each other. \r\nAs a result, both generations apply `prefix hashing`, and **_resulting inference speed comparison is irrelevant_**. \r\n\r\n```python\r\n# [current state of a file]\r\nfrom vllm import LLM, SamplingParams\r\n\r\nprefix = ...\r\nprompts = [...]\r\n\r\nsampling_params = SamplingParams(temperature=0.0)\r\n\r\nllm = LLM(model=\"facebook/opt-125m\", enable_prefix_caching=True)\r\ngenerating_prompts = [prefix + prompt for prompt in prompts]\r\n\r\n# [intended generation w/o prefix_caching]\r\noutputs = llm.generate(generating_prompts, sampling_params)\r\n\r\n...\r\n# [intended generation w/ prefix_caching]\r\n\r\n# The llm.generate call will batch all prompts and send the batch at once\r\n# if resources allow. The prefix will only be cached after the first batch\r\n# is processed, so we need to call generate once to calculate the prefix\r\n# and cache it.\r\noutputs = llm.generate(generating_prompts[0], sampling_params)\r\n\r\n# Subsequent batches can leverage the cached prefix\r\noutputs = llm.generate(generating_prompts, sampling_params)\r\n\r\n```\r\n\r\n**Proposed solution**: create two llm objects, named `regular_llm` and `prefix_cached_llm`, that are `w/o prefix_caching` and `w/ prefix_caching` respectively. Then, run the generation using those two models, obtaining varying results.\r\n\r\n---\r\n### 2. Missleading information\r\n**Intended use**: file should cover usage details and explain how to use `enable_prefix_caching` parameter correctly.\r\n**Issue**: currently, file contents states that _\"prefix will only be cached after the first batch is processed\"_, however my tests disprove that statement, you could test that using the following minimal reproducing code snippet:\r\n```python\r\n# [my code to show that cache works on the fly]\r\nfrom vllm import LLM, SamplingParams\r\nfrom time import time\r\n\r\nprefix = (\r\n \"You are an expert school principal, skilled in effectively managing \"\r\n \"faculty and staff. Draft 10-15 questions for a potential first grade \"\r\n \"Head Teacher for my K-12, all-girls', independent school that emphasizes \"\r\n \"community, joyful discovery, and life-long learning. The candidate is \"\r\n \"coming in for a first-round panel interview for a 8th grade Math \"\r\n \"teaching role. They have 5 years of previous teaching experience \"\r\n \"as an assistant teacher at a co-ed, public school with experience \"\r\n \"in middle school math teaching. Based on these information, fulfill \"\r\n \"the following paragraph: \")\r\n\r\nprompts = [\r\n \"Hello, my name is\",\r\n \"The president of the United States is\",\r\n \"The capital of France is\",\r\n \"The future of AI is\",\r\n]\r\n\r\ngenerating_prompts = [prefix + prompt for prompt in prompts]\r\n\r\nsampling_params = SamplingParams(temperature=0.0)\r\nregular_llm = LLM(model=\"facebook/opt-125m\", \r\n gpu_memory_utilization=0.4)\r\n\r\nprefix_cached_llm = LLM(model=\"facebook/opt-125m\", \r\n gpu_memory_utilization=0.4,\r\n enable_prefix_caching=True)\r\n\r\n# execution time \"without prefix caching\"\r\ns = time()\r\noutputs = regular_llm.generate(generating_prompts, sampling_params)\r\nslow_gen_time = time() - s\r\n\r\n# execution time when \"prefix is not cached yet\" \r\n# according to what is written in example file now\r\ns = time()\r\noutputs = prefix_cached_llm.generate(generating_prompts, sampling_params)\r\nfirst_gen_time = time() - s\r\n\r\n# execution time when \"prefix is already cached\" \r\ns = time()\r\noutputs = prefix_cached_llm.generate(generating_prompts, sampling_params)\r\nsecond_gen_time = time() - s\r\n\r\nprint(round(first_gen_time / second_gen_time, 2))\r\nprint(round(slow_gen_time / second_gen_time, 2))\r\n```\r\n\r\nFor my setup: \r\n* speedup for \"without prefix caching\" vs \"prefix is already cached\" is `1.3`, \r\n* speedup for \"prefix is already cached\" vs \"prefix is not cached yet\" is `1.01`. \r\n\r\nAs there is no significant speedup for the second comparison above, I assume that prefix is cached on the fly during the first answer generation by the model. Therefore, the comment on the \"warmup\" is missleading and it should be deleted. \r\n**Proposed solution**: delete missleading comment on the \"warmup\" generation for prefix caching.\n", "before_files": [{"content": "from vllm import LLM, SamplingParams\n\nprefix = (\n \"You are an expert school principal, skilled in effectively managing \"\n \"faculty and staff. Draft 10-15 questions for a potential first grade \"\n \"Head Teacher for my K-12, all-girls', independent school that emphasizes \"\n \"community, joyful discovery, and life-long learning. The candidate is \"\n \"coming in for a first-round panel interview for a 8th grade Math \"\n \"teaching role. They have 5 years of previous teaching experience \"\n \"as an assistant teacher at a co-ed, public school with experience \"\n \"in middle school math teaching. Based on these information, fulfill \"\n \"the following paragraph: \")\n\n# Sample prompts.\nprompts = [\n \"Hello, my name is\",\n \"The president of the United States is\",\n \"The capital of France is\",\n \"The future of AI is\",\n]\n# Create a sampling params object.\nsampling_params = SamplingParams(temperature=0.0)\n\n# Create an LLM.\nllm = LLM(model=\"facebook/opt-125m\", enable_prefix_caching=True)\n\ngenerating_prompts = [prefix + prompt for prompt in prompts]\n\n# Generate texts from the prompts. The output is a list of RequestOutput objects\n# that contain the prompt, generated text, and other information.\noutputs = llm.generate(generating_prompts, sampling_params)\n# Print the outputs.\nfor output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n\nprint(\"-\" * 80)\n\n# The llm.generate call will batch all prompts and send the batch at once\n# if resources allow. The prefix will only be cached after the first batch\n# is processed, so we need to call generate once to calculate the prefix\n# and cache it.\noutputs = llm.generate(generating_prompts[0], sampling_params)\n\n# Subsequent batches can leverage the cached prefix\noutputs = llm.generate(generating_prompts, sampling_params)\n\n# Print the outputs. You should see the same outputs as before\nfor output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n", "path": "examples/offline_inference_with_prefix.py"}], "after_files": [{"content": "from time import time\n\nfrom vllm import LLM, SamplingParams\n\n# Common prefix.\nprefix = (\n \"You are an expert school principal, skilled in effectively managing \"\n \"faculty and staff. Draft 10-15 questions for a potential first grade \"\n \"Head Teacher for my K-12, all-girls', independent school that emphasizes \"\n \"community, joyful discovery, and life-long learning. The candidate is \"\n \"coming in for a first-round panel interview for a 8th grade Math \"\n \"teaching role. They have 5 years of previous teaching experience \"\n \"as an assistant teacher at a co-ed, public school with experience \"\n \"in middle school math teaching. Based on these information, fulfill \"\n \"the following paragraph: \")\n\n# Sample prompts.\nprompts = [\n \"Hello, my name is\",\n \"The president of the United States is\",\n \"The capital of France is\",\n \"The future of AI is\",\n]\n\ngenerating_prompts = [prefix + prompt for prompt in prompts]\n\n# Create a sampling params object.\nsampling_params = SamplingParams(temperature=0.0)\n\n# Create an LLM.\nregular_llm = LLM(model=\"facebook/opt-125m\", gpu_memory_utilization=0.4)\n\nprefix_cached_llm = LLM(model=\"facebook/opt-125m\",\n enable_prefix_caching=True,\n gpu_memory_utilization=0.4)\nprint(\"Results without `enable_prefix_caching`\")\n\n# Generate texts from the prompts. The output is a list of RequestOutput objects\n# that contain the prompt, generated text, and other information.\nstart_time_regular = time()\noutputs = regular_llm.generate(generating_prompts, sampling_params)\nduration_regular = time() - start_time_regular\n\nregular_generated_texts = []\n# Print the outputs.\nfor output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n regular_generated_texts.append(generated_text)\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n\nprint(\"-\" * 80)\n\n# The llm.generate call will batch all prompts and send the batch at once\n# if resources allow.\nstart_time_cached = time()\noutputs = prefix_cached_llm.generate(generating_prompts, sampling_params)\nduration_cached = time() - start_time_cached\n\nprint(\"Results with `enable_prefix_caching`\")\n\ncached_generated_texts = []\n# Print the outputs. You should see the same outputs as before.\nfor output in outputs:\n prompt = output.prompt\n generated_text = output.outputs[0].text\n cached_generated_texts.append(generated_text)\n print(f\"Prompt: {prompt!r}, Generated text: {generated_text!r}\")\n\nprint(\"-\" * 80)\n\n# Compare the results and display the speedup\ngenerated_same = all([\n regular_generated_texts[i] == cached_generated_texts[i]\n for i in range(len(prompts))\n])\nprint(f\"Generated answers are the same: {generated_same}\")\n\nspeedup = round(duration_regular / duration_cached, 2)\nprint(f\"Speed up of cached generation compared to the regular is: {speedup}\")\n", "path": "examples/offline_inference_with_prefix.py"}]}
| 3,928 | 788 |
gh_patches_debug_19028
|
rasdani/github-patches
|
git_diff
|
coala__coala-319
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multiple dots irritating in console table
```
| . |
| . |
| . |
| 32| 32| if not isinstance(file_diff_dict, dict):
| 33| 33| raise TypeError("file_diff_dict has to be of type dict.")
| 34| 34|
| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))
| | | [NORMAL] LineLengthBear:
| | | Line is longer than allowed. (129 > 120)
| | | The following options are applicable to this result (choose 0 for no action):
```
The dots in the first three lines are irritating. Putting in the line above the code `...` or `[...]` (or even something else).
So it should look for example like:
```
| 31| 31| [...]
| 32| 32| if not isinstance(file_diff_dict, dict):
| 33| 33| raise TypeError("file_diff_dict has to be of type dict.")
| 34| 34|
| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))
| | | [NORMAL] LineLengthBear:
| | | Line is longer than allowed. (129 > 120)
| | | The following options are applicable to this result (choose 0 for no action):
```
or maybe put the dots inside the table like this:
```
| ...| ...|
| 32| 32| if not isinstance(file_diff_dict, dict):
| 33| 33| raise TypeError("file_diff_dict has to be of type dict.")
| 34| 34|
| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))
| | | [NORMAL] LineLengthBear:
| | | Line is longer than allowed. (129 > 120)
| | | The following options are applicable to this result (choose 0 for no action):
```
Or if that's too complicated just leave them out.
```
| 32| 32| if not isinstance(file_diff_dict, dict):
| 33| 33| raise TypeError("file_diff_dict has to be of type dict.")
| 34| 34|
| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))
| | | [NORMAL] LineLengthBear:
| | | Line is longer than allowed. (129 > 120)
| | | The following options are applicable to this result (choose 0 for no action):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/misc/KeywordBear.py`
Content:
```
1 from coalib.results.Result import Result
2 from coalib.bears.LocalBear import LocalBear
3 from coalib.misc.i18n import _
4
5
6 class KeywordBear(LocalBear):
7 def run(self,
8 filename,
9 file,
10 cs_keywords: list,
11 ci_keywords: list):
12 """
13 Checks the code files for given keywords.
14
15 :param cs_keywords: A list of keywords to search for case sensitively. Usual examples are TODO and FIXME.
16 :param ci_keywords: A list of keywords to search for case insensitively.
17 """
18 results = []
19
20 for i in range(len(ci_keywords)):
21 ci_keywords[i] = ci_keywords[i].lower()
22
23 for line_number, line in enumerate(file):
24 found_kws = []
25 for kw in cs_keywords:
26 if kw in line:
27 found_kws.append(kw)
28
29 for kw in ci_keywords:
30 if kw in line.lower():
31 found_kws.append(kw)
32
33 if found_kws != []:
34 results.append(Result(origin=self,
35 message=_("Line contains the following keywords:") + "\n" + ", ".join(found_kws),
36 file=filename,
37 line_nr=line_number + 1))
38
39 return results
40
```
Path: `coalib/output/ConsoleInteractor.py`
Content:
```
1 from coalib.output.printers.ConsolePrinter import ConsolePrinter
2 from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
3 from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
4 from coalib.output.Interactor import Interactor
5 from coalib.misc.i18n import _
6
7
8 class ConsoleInteractor(Interactor, ConsolePrinter):
9 STR_GET_VAL_FOR_SETTING = _("Please enter a value for the setting \"{}\" ({}) needed by {}: ")
10 STR_LINE_DOESNT_EXIST = _("The line belonging to the following result cannot be printed because it refers to a "
11 "line that doesn't seem to exist in the given file.")
12 STR_PROJECT_WIDE = _("Project wide:")
13
14 def __init__(self,
15 pre_padding: int=3,
16 log_printer=ConsolePrinter()):
17 """
18 A ConsoleInteractor uses the Console to interact with the user.
19
20 :param output: "stdout" or "stderr".
21 :param pre_padding: Number of code lines to show before a result as context.
22 """
23 Interactor.__init__(self, log_printer=log_printer)
24 ConsolePrinter.__init__(self)
25
26 self.pre_padding = pre_padding
27
28 def acquire_settings(self, settings_names_dict):
29 if not isinstance(settings_names_dict, dict):
30 raise TypeError("The settings_names_dict parameter has to be a dictionary.")
31
32 result = {}
33 for setting_name, arr in settings_names_dict.items():
34 value = self._require_setting(setting_name, arr)
35 if value is not None:
36 result[setting_name] = value
37
38 return result
39
40 def _require_setting(self, setting_name, arr):
41 if not isinstance(arr, list) or len(arr) < 2:
42 self.log_printer.log(LOG_LEVEL.WARNING,
43 _("One of the given settings ({}) is not "
44 "properly described.").
45 format(str(setting_name)))
46
47 return None
48
49 if len(arr) == 2:
50 needed = arr[1]
51 else: # Translators: this is the and that connects the last two items of an enumeration (1st, 2nd AND 3rd)
52 needed = ", ".join(arr[1:-1]) + _(" and ") + arr[-1]
53
54 return input(self.STR_GET_VAL_FOR_SETTING.format(str(setting_name),
55 str(arr[0]),
56 needed))
57
58 def _format_line(self, line, real_nr="", sign="|", mod_nr="", symbol="", ):
59 return "|{:>4}{}{:>4}|{:1}{}".format(real_nr, sign, mod_nr, symbol, line.rstrip("\n"))
60
61 def _print_result(self, result):
62 message_string_list = "[{sev}] {bear}:\n{msg}".format(sev=RESULT_SEVERITY.__str__(result.severity),
63 bear=result.origin,
64 msg=result.message).split("\n")
65
66 return self.print("\n".join([self._format_line(line) for line in message_string_list]))
67
68 def _print_actions(self, actions):
69 self.print(self._format_line(
70 _("The following options are applicable to this result (choose "
71 "0 for no action):")))
72
73 choice = self._choose_action(actions)
74
75 if choice == 0:
76 return None, None
77
78 return self._get_action_info(actions[choice - 1])
79
80 def _choose_action(self, actions):
81 while True:
82 for i, action in enumerate(actions):
83 self.print(self._format_line("{:>2}: {}".format(i + 1, action.desc)))
84
85 try:
86 line = self._format_line(_("Please enter the number of the "
87 "action you want to execute. "))
88 choice = int(input(line))
89 if 0 <= choice <= len(actions):
90 return choice
91 except ValueError:
92 pass
93
94 self.print(self._format_line(_("Please enter a valid number.")))
95
96 def _get_action_info(self, action):
97 # Otherwise we have a recursive import
98 from coalib.settings.Section import Section
99 from coalib.settings.Setting import Setting
100
101 params = action.non_optional_params
102 section = Section("")
103
104 for param_name in params:
105 question = self._format_line(
106 _("Please enter a value for the parameter '{}' ({}): ")
107 .format(param_name, params[param_name][0]))
108 section.append(Setting(param_name, input(question)))
109
110 return action.name, section
111
112 def _print_segregation(self, n=3):
113 self.print("\n".join(self._format_line(line="", sign=".") for i in range(n)))
114
115 def _print_lines(self, file_dict, current_line, result_line, result_file):
116 """
117 Prints the lines between the current and the result line. If needed they will be shortened.
118 """
119 line_delta = result_line - current_line
120
121 if line_delta > self.pre_padding:
122 self._print_segregation()
123
124 for i in range(max(result_line - self.pre_padding, 1), result_line + 1):
125 self.print(self._format_line(line=file_dict[result_file][i - 1],
126 real_nr=i,
127 mod_nr=i))
128 else:
129 for i in range(1, line_delta + 1):
130 self.print(self._format_line(line=file_dict[result_file][current_line + i - 1],
131 real_nr=current_line + i,
132 mod_nr=current_line + i))
133
134 def print_results(self, result_list, file_dict):
135 if not isinstance(result_list, list):
136 raise TypeError("result_list should be of type list")
137 if not isinstance(file_dict, dict):
138 raise TypeError("file_dict should be of type dict")
139
140 current_file = False # We can't use None since we need line 109 be executed if file of first result is None
141 current_line = 0
142
143 for result in sorted(result_list):
144 if result.file != current_file:
145 if result.file in file_dict or result.file is None:
146 current_file = result.file
147 current_line = 0
148 self.print("\n\n{}".format(current_file if current_file is not None else self.STR_PROJECT_WIDE))
149 else:
150 self.log_printer.warn(_("A result ({}) cannot be printed because it refers to a file that doesn't"
151 " seem to exist.").format(str(result)))
152 continue
153
154 if result.line_nr is not None:
155 if current_file is None:
156 raise AssertionError("A result with a line_nr should also have a file.")
157 if result.line_nr < current_line: # pragma: no cover
158 raise AssertionError("The sorting of the results doesn't work correctly.")
159 if len(file_dict[result.file]) < result.line_nr - 1:
160 self.print(self._format_line(line=self.STR_LINE_DOESNT_EXIST))
161 else:
162 self._print_lines(file_dict, current_line, result.line_nr, result.file)
163 current_line = result.line_nr
164
165 self.print_result(result, file_dict)
166
167 def begin_section(self, name):
168 self.print(_("Executing section {name}...").format(name=name))
169
170 def did_nothing(self):
171 self.print(_("No existent section was targeted or enabled. Nothing "
172 "to do."))
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bears/misc/KeywordBear.py b/bears/misc/KeywordBear.py
--- a/bears/misc/KeywordBear.py
+++ b/bears/misc/KeywordBear.py
@@ -12,8 +12,8 @@
"""
Checks the code files for given keywords.
- :param cs_keywords: A list of keywords to search for case sensitively. Usual examples are TODO and FIXME.
- :param ci_keywords: A list of keywords to search for case insensitively.
+ :param cs_keywords: A list of keywords to search for (case sensitive). Usual examples are TODO and FIXME.
+ :param ci_keywords: A list of keywords to search for (case insensitive).
"""
results = []
diff --git a/coalib/output/ConsoleInteractor.py b/coalib/output/ConsoleInteractor.py
--- a/coalib/output/ConsoleInteractor.py
+++ b/coalib/output/ConsoleInteractor.py
@@ -109,8 +109,8 @@
return action.name, section
- def _print_segregation(self, n=3):
- self.print("\n".join(self._format_line(line="", sign=".") for i in range(n)))
+ def _print_segregation(self):
+ self.print(self._format_line(line="", real_nr="...", sign="|", mod_nr="..."))
def _print_lines(self, file_dict, current_line, result_line, result_file):
"""
|
{"golden_diff": "diff --git a/bears/misc/KeywordBear.py b/bears/misc/KeywordBear.py\n--- a/bears/misc/KeywordBear.py\n+++ b/bears/misc/KeywordBear.py\n@@ -12,8 +12,8 @@\n \"\"\"\n Checks the code files for given keywords.\n \n- :param cs_keywords: A list of keywords to search for case sensitively. Usual examples are TODO and FIXME.\n- :param ci_keywords: A list of keywords to search for case insensitively.\n+ :param cs_keywords: A list of keywords to search for (case sensitive). Usual examples are TODO and FIXME.\n+ :param ci_keywords: A list of keywords to search for (case insensitive).\n \"\"\"\n results = []\n \ndiff --git a/coalib/output/ConsoleInteractor.py b/coalib/output/ConsoleInteractor.py\n--- a/coalib/output/ConsoleInteractor.py\n+++ b/coalib/output/ConsoleInteractor.py\n@@ -109,8 +109,8 @@\n \n return action.name, section\n \n- def _print_segregation(self, n=3):\n- self.print(\"\\n\".join(self._format_line(line=\"\", sign=\".\") for i in range(n)))\n+ def _print_segregation(self):\n+ self.print(self._format_line(line=\"\", real_nr=\"...\", sign=\"|\", mod_nr=\"...\"))\n \n def _print_lines(self, file_dict, current_line, result_line, result_file):\n \"\"\"\n", "issue": "Multiple dots irritating in console table\n```\n| . | \n| . | \n| . | \n| 32| 32| if not isinstance(file_diff_dict, dict):\n| 33| 33| raise TypeError(\"file_diff_dict has to be of type dict.\")\n| 34| 34| \n| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))\n| | | [NORMAL] LineLengthBear:\n| | | Line is longer than allowed. (129 > 120)\n| | | The following options are applicable to this result (choose 0 for no action):\n```\n\nThe dots in the first three lines are irritating. Putting in the line above the code `...` or `[...]` (or even something else).\nSo it should look for example like:\n\n```\n| 31| 31| [...]\n| 32| 32| if not isinstance(file_diff_dict, dict):\n| 33| 33| raise TypeError(\"file_diff_dict has to be of type dict.\")\n| 34| 34| \n| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))\n| | | [NORMAL] LineLengthBear:\n| | | Line is longer than allowed. (129 > 120)\n| | | The following options are applicable to this result (choose 0 for no action):\n```\n\nor maybe put the dots inside the table like this:\n\n```\n| ...| ...|\n| 32| 32| if not isinstance(file_diff_dict, dict):\n| 33| 33| raise TypeError(\"file_diff_dict has to be of type dict.\")\n| 34| 34| \n| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))\n| | | [NORMAL] LineLengthBear:\n| | | Line is longer than allowed. (129 > 120)\n| | | The following options are applicable to this result (choose 0 for no action):\n```\n\nOr if that's too complicated just leave them out.\n\n```\n| 32| 32| if not isinstance(file_diff_dict, dict):\n| 33| 33| raise TypeError(\"file_diff_dict has to be of type dict.\")\n| 34| 34| \n| 35| 35| return self.apply(result, original_file_dict, file_diff_dict, **self.get_metadata().create_params_from_section(section))\n| | | [NORMAL] LineLengthBear:\n| | | Line is longer than allowed. (129 > 120)\n| | | The following options are applicable to this result (choose 0 for no action):\n```\n\n", "before_files": [{"content": "from coalib.results.Result import Result\nfrom coalib.bears.LocalBear import LocalBear\nfrom coalib.misc.i18n import _\n\n\nclass KeywordBear(LocalBear):\n def run(self,\n filename,\n file,\n cs_keywords: list,\n ci_keywords: list):\n \"\"\"\n Checks the code files for given keywords.\n\n :param cs_keywords: A list of keywords to search for case sensitively. Usual examples are TODO and FIXME.\n :param ci_keywords: A list of keywords to search for case insensitively.\n \"\"\"\n results = []\n\n for i in range(len(ci_keywords)):\n ci_keywords[i] = ci_keywords[i].lower()\n\n for line_number, line in enumerate(file):\n found_kws = []\n for kw in cs_keywords:\n if kw in line:\n found_kws.append(kw)\n\n for kw in ci_keywords:\n if kw in line.lower():\n found_kws.append(kw)\n\n if found_kws != []:\n results.append(Result(origin=self,\n message=_(\"Line contains the following keywords:\") + \"\\n\" + \", \".join(found_kws),\n file=filename,\n line_nr=line_number + 1))\n\n return results\n", "path": "bears/misc/KeywordBear.py"}, {"content": "from coalib.output.printers.ConsolePrinter import ConsolePrinter\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL\nfrom coalib.output.Interactor import Interactor\nfrom coalib.misc.i18n import _\n\n\nclass ConsoleInteractor(Interactor, ConsolePrinter):\n STR_GET_VAL_FOR_SETTING = _(\"Please enter a value for the setting \\\"{}\\\" ({}) needed by {}: \")\n STR_LINE_DOESNT_EXIST = _(\"The line belonging to the following result cannot be printed because it refers to a \"\n \"line that doesn't seem to exist in the given file.\")\n STR_PROJECT_WIDE = _(\"Project wide:\")\n\n def __init__(self,\n pre_padding: int=3,\n log_printer=ConsolePrinter()):\n \"\"\"\n A ConsoleInteractor uses the Console to interact with the user.\n\n :param output: \"stdout\" or \"stderr\".\n :param pre_padding: Number of code lines to show before a result as context.\n \"\"\"\n Interactor.__init__(self, log_printer=log_printer)\n ConsolePrinter.__init__(self)\n\n self.pre_padding = pre_padding\n\n def acquire_settings(self, settings_names_dict):\n if not isinstance(settings_names_dict, dict):\n raise TypeError(\"The settings_names_dict parameter has to be a dictionary.\")\n\n result = {}\n for setting_name, arr in settings_names_dict.items():\n value = self._require_setting(setting_name, arr)\n if value is not None:\n result[setting_name] = value\n\n return result\n\n def _require_setting(self, setting_name, arr):\n if not isinstance(arr, list) or len(arr) < 2:\n self.log_printer.log(LOG_LEVEL.WARNING,\n _(\"One of the given settings ({}) is not \"\n \"properly described.\").\n format(str(setting_name)))\n\n return None\n\n if len(arr) == 2:\n needed = arr[1]\n else: # Translators: this is the and that connects the last two items of an enumeration (1st, 2nd AND 3rd)\n needed = \", \".join(arr[1:-1]) + _(\" and \") + arr[-1]\n\n return input(self.STR_GET_VAL_FOR_SETTING.format(str(setting_name),\n str(arr[0]),\n needed))\n\n def _format_line(self, line, real_nr=\"\", sign=\"|\", mod_nr=\"\", symbol=\"\", ):\n return \"|{:>4}{}{:>4}|{:1}{}\".format(real_nr, sign, mod_nr, symbol, line.rstrip(\"\\n\"))\n\n def _print_result(self, result):\n message_string_list = \"[{sev}] {bear}:\\n{msg}\".format(sev=RESULT_SEVERITY.__str__(result.severity),\n bear=result.origin,\n msg=result.message).split(\"\\n\")\n\n return self.print(\"\\n\".join([self._format_line(line) for line in message_string_list]))\n\n def _print_actions(self, actions):\n self.print(self._format_line(\n _(\"The following options are applicable to this result (choose \"\n \"0 for no action):\")))\n\n choice = self._choose_action(actions)\n\n if choice == 0:\n return None, None\n\n return self._get_action_info(actions[choice - 1])\n\n def _choose_action(self, actions):\n while True:\n for i, action in enumerate(actions):\n self.print(self._format_line(\"{:>2}: {}\".format(i + 1, action.desc)))\n\n try:\n line = self._format_line(_(\"Please enter the number of the \"\n \"action you want to execute. \"))\n choice = int(input(line))\n if 0 <= choice <= len(actions):\n return choice\n except ValueError:\n pass\n\n self.print(self._format_line(_(\"Please enter a valid number.\")))\n\n def _get_action_info(self, action):\n # Otherwise we have a recursive import\n from coalib.settings.Section import Section\n from coalib.settings.Setting import Setting\n\n params = action.non_optional_params\n section = Section(\"\")\n\n for param_name in params:\n question = self._format_line(\n _(\"Please enter a value for the parameter '{}' ({}): \")\n .format(param_name, params[param_name][0]))\n section.append(Setting(param_name, input(question)))\n\n return action.name, section\n\n def _print_segregation(self, n=3):\n self.print(\"\\n\".join(self._format_line(line=\"\", sign=\".\") for i in range(n)))\n\n def _print_lines(self, file_dict, current_line, result_line, result_file):\n \"\"\"\n Prints the lines between the current and the result line. If needed they will be shortened.\n \"\"\"\n line_delta = result_line - current_line\n\n if line_delta > self.pre_padding:\n self._print_segregation()\n\n for i in range(max(result_line - self.pre_padding, 1), result_line + 1):\n self.print(self._format_line(line=file_dict[result_file][i - 1],\n real_nr=i,\n mod_nr=i))\n else:\n for i in range(1, line_delta + 1):\n self.print(self._format_line(line=file_dict[result_file][current_line + i - 1],\n real_nr=current_line + i,\n mod_nr=current_line + i))\n\n def print_results(self, result_list, file_dict):\n if not isinstance(result_list, list):\n raise TypeError(\"result_list should be of type list\")\n if not isinstance(file_dict, dict):\n raise TypeError(\"file_dict should be of type dict\")\n\n current_file = False # We can't use None since we need line 109 be executed if file of first result is None\n current_line = 0\n\n for result in sorted(result_list):\n if result.file != current_file:\n if result.file in file_dict or result.file is None:\n current_file = result.file\n current_line = 0\n self.print(\"\\n\\n{}\".format(current_file if current_file is not None else self.STR_PROJECT_WIDE))\n else:\n self.log_printer.warn(_(\"A result ({}) cannot be printed because it refers to a file that doesn't\"\n \" seem to exist.\").format(str(result)))\n continue\n\n if result.line_nr is not None:\n if current_file is None:\n raise AssertionError(\"A result with a line_nr should also have a file.\")\n if result.line_nr < current_line: # pragma: no cover\n raise AssertionError(\"The sorting of the results doesn't work correctly.\")\n if len(file_dict[result.file]) < result.line_nr - 1:\n self.print(self._format_line(line=self.STR_LINE_DOESNT_EXIST))\n else:\n self._print_lines(file_dict, current_line, result.line_nr, result.file)\n current_line = result.line_nr\n\n self.print_result(result, file_dict)\n\n def begin_section(self, name):\n self.print(_(\"Executing section {name}...\").format(name=name))\n\n def did_nothing(self):\n self.print(_(\"No existent section was targeted or enabled. Nothing \"\n \"to do.\"))\n", "path": "coalib/output/ConsoleInteractor.py"}], "after_files": [{"content": "from coalib.results.Result import Result\nfrom coalib.bears.LocalBear import LocalBear\nfrom coalib.misc.i18n import _\n\n\nclass KeywordBear(LocalBear):\n def run(self,\n filename,\n file,\n cs_keywords: list,\n ci_keywords: list):\n \"\"\"\n Checks the code files for given keywords.\n\n :param cs_keywords: A list of keywords to search for (case sensitive). Usual examples are TODO and FIXME.\n :param ci_keywords: A list of keywords to search for (case insensitive).\n \"\"\"\n results = []\n\n for i in range(len(ci_keywords)):\n ci_keywords[i] = ci_keywords[i].lower()\n\n for line_number, line in enumerate(file):\n found_kws = []\n for kw in cs_keywords:\n if kw in line:\n found_kws.append(kw)\n\n for kw in ci_keywords:\n if kw in line.lower():\n found_kws.append(kw)\n\n if found_kws != []:\n results.append(Result(origin=self,\n message=_(\"Line contains the following keywords:\") + \"\\n\" + \", \".join(found_kws),\n file=filename,\n line_nr=line_number + 1))\n\n return results\n", "path": "bears/misc/KeywordBear.py"}, {"content": "from coalib.output.printers.ConsolePrinter import ConsolePrinter\nfrom coalib.results.RESULT_SEVERITY import RESULT_SEVERITY\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL\nfrom coalib.output.Interactor import Interactor\nfrom coalib.misc.i18n import _\n\n\nclass ConsoleInteractor(Interactor, ConsolePrinter):\n STR_GET_VAL_FOR_SETTING = _(\"Please enter a value for the setting \\\"{}\\\" ({}) needed by {}: \")\n STR_LINE_DOESNT_EXIST = _(\"The line belonging to the following result cannot be printed because it refers to a \"\n \"line that doesn't seem to exist in the given file.\")\n STR_PROJECT_WIDE = _(\"Project wide:\")\n\n def __init__(self,\n pre_padding: int=3,\n log_printer=ConsolePrinter()):\n \"\"\"\n A ConsoleInteractor uses the Console to interact with the user.\n\n :param output: \"stdout\" or \"stderr\".\n :param pre_padding: Number of code lines to show before a result as context.\n \"\"\"\n Interactor.__init__(self, log_printer=log_printer)\n ConsolePrinter.__init__(self)\n\n self.pre_padding = pre_padding\n\n def acquire_settings(self, settings_names_dict):\n if not isinstance(settings_names_dict, dict):\n raise TypeError(\"The settings_names_dict parameter has to be a dictionary.\")\n\n result = {}\n for setting_name, arr in settings_names_dict.items():\n value = self._require_setting(setting_name, arr)\n if value is not None:\n result[setting_name] = value\n\n return result\n\n def _require_setting(self, setting_name, arr):\n if not isinstance(arr, list) or len(arr) < 2:\n self.log_printer.log(LOG_LEVEL.WARNING,\n _(\"One of the given settings ({}) is not \"\n \"properly described.\").\n format(str(setting_name)))\n\n return None\n\n if len(arr) == 2:\n needed = arr[1]\n else: # Translators: this is the and that connects the last two items of an enumeration (1st, 2nd AND 3rd)\n needed = \", \".join(arr[1:-1]) + _(\" and \") + arr[-1]\n\n return input(self.STR_GET_VAL_FOR_SETTING.format(str(setting_name),\n str(arr[0]),\n needed))\n\n def _format_line(self, line, real_nr=\"\", sign=\"|\", mod_nr=\"\", symbol=\"\", ):\n return \"|{:>4}{}{:>4}|{:1}{}\".format(real_nr, sign, mod_nr, symbol, line.rstrip(\"\\n\"))\n\n def _print_result(self, result):\n message_string_list = \"[{sev}] {bear}:\\n{msg}\".format(sev=RESULT_SEVERITY.__str__(result.severity),\n bear=result.origin,\n msg=result.message).split(\"\\n\")\n\n return self.print(\"\\n\".join([self._format_line(line) for line in message_string_list]))\n\n def _print_actions(self, actions):\n self.print(self._format_line(\n _(\"The following options are applicable to this result (choose \"\n \"0 for no action):\")))\n\n choice = self._choose_action(actions)\n\n if choice == 0:\n return None, None\n\n return self._get_action_info(actions[choice - 1])\n\n def _choose_action(self, actions):\n while True:\n for i, action in enumerate(actions):\n self.print(self._format_line(\"{:>2}: {}\".format(i + 1, action.desc)))\n\n try:\n line = self._format_line(_(\"Please enter the number of the \"\n \"action you want to execute. \"))\n choice = int(input(line))\n if 0 <= choice <= len(actions):\n return choice\n except ValueError:\n pass\n\n self.print(self._format_line(_(\"Please enter a valid number.\")))\n\n def _get_action_info(self, action):\n # Otherwise we have a recursive import\n from coalib.settings.Section import Section\n from coalib.settings.Setting import Setting\n\n params = action.non_optional_params\n section = Section(\"\")\n\n for param_name in params:\n question = self._format_line(\n _(\"Please enter a value for the parameter '{}' ({}): \")\n .format(param_name, params[param_name][0]))\n section.append(Setting(param_name, input(question)))\n\n return action.name, section\n\n def _print_segregation(self):\n self.print(self._format_line(line=\"\", real_nr=\"...\", sign=\"|\", mod_nr=\"...\"))\n\n def _print_lines(self, file_dict, current_line, result_line, result_file):\n \"\"\"\n Prints the lines between the current and the result line. If needed they will be shortened.\n \"\"\"\n line_delta = result_line - current_line\n\n if line_delta > self.pre_padding:\n self._print_segregation()\n\n for i in range(max(result_line - self.pre_padding, 1), result_line + 1):\n self.print(self._format_line(line=file_dict[result_file][i - 1],\n real_nr=i,\n mod_nr=i))\n else:\n for i in range(1, line_delta + 1):\n self.print(self._format_line(line=file_dict[result_file][current_line + i - 1],\n real_nr=current_line + i,\n mod_nr=current_line + i))\n\n def print_results(self, result_list, file_dict):\n if not isinstance(result_list, list):\n raise TypeError(\"result_list should be of type list\")\n if not isinstance(file_dict, dict):\n raise TypeError(\"file_dict should be of type dict\")\n\n current_file = False # We can't use None since we need line 109 be executed if file of first result is None\n current_line = 0\n\n for result in sorted(result_list):\n if result.file != current_file:\n if result.file in file_dict or result.file is None:\n current_file = result.file\n current_line = 0\n self.print(\"\\n\\n{}\".format(current_file if current_file is not None else self.STR_PROJECT_WIDE))\n else:\n self.log_printer.warn(_(\"A result ({}) cannot be printed because it refers to a file that doesn't\"\n \" seem to exist.\").format(str(result)))\n continue\n\n if result.line_nr is not None:\n if current_file is None:\n raise AssertionError(\"A result with a line_nr should also have a file.\")\n if result.line_nr < current_line: # pragma: no cover\n raise AssertionError(\"The sorting of the results doesn't work correctly.\")\n if len(file_dict[result.file]) < result.line_nr - 1:\n self.print(self._format_line(line=self.STR_LINE_DOESNT_EXIST))\n else:\n self._print_lines(file_dict, current_line, result.line_nr, result.file)\n current_line = result.line_nr\n\n self.print_result(result, file_dict)\n\n def begin_section(self, name):\n self.print(_(\"Executing section {name}...\").format(name=name))\n\n def did_nothing(self):\n self.print(_(\"No existent section was targeted or enabled. Nothing \"\n \"to do.\"))\n", "path": "coalib/output/ConsoleInteractor.py"}]}
| 3,271 | 321 |
gh_patches_debug_34370
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-5413
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Select all not working in groups
### Observed behavior
In the coach tab, while enrolling learners in the group first select single user and then select select all option and then enroll. It will enroll only one user in the group.
### Expected behavior
When we select select all, it must enroll all users in the group.
### Steps to reproduce
1. Create the group.
2. Select one user while enrolling.
3. Then select select all in groups.
4. Click on enroll.
5. It will enroll only one user in the group.
### Context
* Kolibri version : Kolibri 0.12.2
* Operating system : Ubuntu 14.04
* Browser : Chrome
### Screenshots





--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/logger/api.py`
Content:
```
1 from django.core.exceptions import ObjectDoesNotExist
2 from django.db.models.query import F
3 from django.http import Http404
4 from django_filters import ModelChoiceFilter
5 from django_filters.rest_framework import CharFilter
6 from django_filters.rest_framework import DjangoFilterBackend
7 from django_filters.rest_framework import FilterSet
8 from rest_framework import filters
9 from rest_framework import viewsets
10 from rest_framework.response import Response
11
12 from .models import AttemptLog
13 from .models import ContentSessionLog
14 from .models import ContentSummaryLog
15 from .models import ExamAttemptLog
16 from .models import ExamLog
17 from .models import MasteryLog
18 from .models import UserSessionLog
19 from .permissions import ExamActivePermissions
20 from .serializers import AttemptLogSerializer
21 from .serializers import ContentSessionLogSerializer
22 from .serializers import ContentSummaryLogSerializer
23 from .serializers import ExamAttemptLogSerializer
24 from .serializers import ExamLogSerializer
25 from .serializers import MasteryLogSerializer
26 from .serializers import TotalContentProgressSerializer
27 from .serializers import UserSessionLogSerializer
28 from kolibri.core.auth.api import KolibriAuthPermissions
29 from kolibri.core.auth.api import KolibriAuthPermissionsFilter
30 from kolibri.core.auth.filters import HierarchyRelationsFilter
31 from kolibri.core.auth.models import Classroom
32 from kolibri.core.auth.models import Collection
33 from kolibri.core.auth.models import Facility
34 from kolibri.core.auth.models import FacilityUser
35 from kolibri.core.auth.models import LearnerGroup
36 from kolibri.core.content.api import OptionalPageNumberPagination
37 from kolibri.core.exams.models import Exam
38
39
40 class BaseLogFilter(FilterSet):
41 facility = ModelChoiceFilter(
42 method="filter_facility", queryset=Facility.objects.all()
43 )
44 classroom = ModelChoiceFilter(
45 method="filter_classroom", queryset=Classroom.objects.all()
46 )
47 learner_group = ModelChoiceFilter(
48 method="filter_learner_group", queryset=LearnerGroup.objects.all()
49 )
50
51 # Only a superuser can filter by facilities
52 def filter_facility(self, queryset, name, value):
53 return queryset.filter(user__facility=value)
54
55 def filter_classroom(self, queryset, name, value):
56 return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
57 ancestor_collection=value, target_user=F("user")
58 )
59
60 def filter_learner_group(self, queryset, name, value):
61 return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
62 ancestor_collection=value, target_user=F("user")
63 )
64
65
66 class LoggerViewSet(viewsets.ModelViewSet):
67 def update(self, request, *args, **kwargs):
68 partial = kwargs.pop("partial", False)
69 model = self.queryset.model
70 lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
71 try:
72 instance = model.objects.get(id=self.kwargs[lookup_url_kwarg])
73 self.check_object_permissions(request, instance)
74 except (ValueError, ObjectDoesNotExist):
75 raise Http404
76 serializer = self.get_serializer(instance, data=request.data, partial=partial)
77 serializer.is_valid(raise_exception=True)
78 self.perform_update(serializer)
79
80 if getattr(instance, "_prefetched_objects_cache", None):
81 # If 'prefetch_related' has been applied to a queryset, we need to
82 # forcibly invalidate the prefetch cache on the instance.
83 instance._prefetched_objects_cache = {}
84 default_response = dict(request.data)
85 # First look if the computed fields to be updated are listed:
86 updating_fields = getattr(serializer.root, "update_fields", None)
87 # If not, fetch all the fields that are computed methods:
88 if updating_fields is None:
89 updating_fields = [
90 field
91 for field in serializer.fields
92 if getattr(serializer.fields[field], "method_name", None)
93 ]
94 for field in updating_fields:
95 method_name = getattr(serializer.fields[field], "method_name", None)
96 if method_name:
97 method = getattr(serializer.root, method_name)
98 default_response[field] = method(instance)
99 return Response(default_response)
100
101
102 class ContentSessionLogFilter(BaseLogFilter):
103 class Meta:
104 model = ContentSessionLog
105 fields = ["user_id", "content_id"]
106
107
108 class ContentSessionLogViewSet(LoggerViewSet):
109 permission_classes = (KolibriAuthPermissions,)
110 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
111 queryset = ContentSessionLog.objects.all()
112 serializer_class = ContentSessionLogSerializer
113 pagination_class = OptionalPageNumberPagination
114 filter_class = ContentSessionLogFilter
115
116
117 class ContentSummaryLogFilter(BaseLogFilter):
118 class Meta:
119 model = ContentSummaryLog
120 fields = ["user_id", "content_id"]
121
122
123 class ContentSummaryLogViewSet(LoggerViewSet):
124 permission_classes = (KolibriAuthPermissions,)
125 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
126 queryset = ContentSummaryLog.objects.all()
127 serializer_class = ContentSummaryLogSerializer
128 pagination_class = OptionalPageNumberPagination
129 filter_class = ContentSummaryLogFilter
130
131
132 class TotalContentProgressViewSet(viewsets.ModelViewSet):
133 permission_classes = (KolibriAuthPermissions,)
134 filter_backends = (KolibriAuthPermissionsFilter,)
135 queryset = FacilityUser.objects.all()
136 serializer_class = TotalContentProgressSerializer
137
138
139 class UserSessionLogFilter(BaseLogFilter):
140 class Meta:
141 model = UserSessionLog
142 fields = ["user_id"]
143
144
145 class UserSessionLogViewSet(LoggerViewSet):
146 permission_classes = (KolibriAuthPermissions,)
147 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
148 queryset = UserSessionLog.objects.all()
149 serializer_class = UserSessionLogSerializer
150 pagination_class = OptionalPageNumberPagination
151 filter_class = UserSessionLogFilter
152
153
154 class MasteryFilter(FilterSet):
155 class Meta:
156 model = MasteryLog
157 fields = ["summarylog"]
158
159
160 class MasteryLogViewSet(LoggerViewSet):
161 permission_classes = (KolibriAuthPermissions,)
162 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
163 queryset = MasteryLog.objects.all()
164 serializer_class = MasteryLogSerializer
165 pagination_class = OptionalPageNumberPagination
166 filter_class = MasteryFilter
167
168
169 class AttemptFilter(BaseLogFilter):
170 content = CharFilter(method="filter_content")
171
172 def filter_content(self, queryset, name, value):
173 return queryset.filter(masterylog__summarylog__content_id=value)
174
175 class Meta:
176 model = AttemptLog
177 fields = ["masterylog", "complete", "user", "content", "item"]
178
179
180 class AttemptLogViewSet(LoggerViewSet):
181 permission_classes = (KolibriAuthPermissions,)
182 filter_backends = (
183 KolibriAuthPermissionsFilter,
184 DjangoFilterBackend,
185 filters.OrderingFilter,
186 )
187 queryset = AttemptLog.objects.all()
188 serializer_class = AttemptLogSerializer
189 pagination_class = OptionalPageNumberPagination
190 filter_class = AttemptFilter
191 ordering_fields = ("end_timestamp",)
192 ordering = ("end_timestamp",)
193
194
195 class ExamAttemptFilter(BaseLogFilter):
196 exam = ModelChoiceFilter(method="filter_exam", queryset=Exam.objects.all())
197 user = ModelChoiceFilter(method="filter_user", queryset=FacilityUser.objects.all())
198 content = CharFilter(field_name="content_id")
199
200 def filter_exam(self, queryset, name, value):
201 return queryset.filter(examlog__exam=value)
202
203 def filter_user(self, queryset, name, value):
204 return queryset.filter(examlog__user=value)
205
206 class Meta:
207 model = ExamAttemptLog
208 fields = ["examlog", "exam", "user", "content", "item"]
209
210
211 class ExamAttemptLogViewSet(LoggerViewSet):
212 permission_classes = (ExamActivePermissions, KolibriAuthPermissions)
213 filter_backends = (
214 KolibriAuthPermissionsFilter,
215 DjangoFilterBackend,
216 filters.OrderingFilter,
217 )
218 queryset = ExamAttemptLog.objects.all()
219 serializer_class = ExamAttemptLogSerializer
220 pagination_class = OptionalPageNumberPagination
221 filter_class = ExamAttemptFilter
222
223
224 class ExamLogFilter(BaseLogFilter):
225
226 collection = ModelChoiceFilter(
227 method="filter_collection", queryset=Collection.objects.all()
228 )
229
230 def filter_collection(self, queryset, name, collection):
231 return HierarchyRelationsFilter(queryset).filter_by_hierarchy(
232 target_user=F("user"), ancestor_collection=collection
233 )
234
235 class Meta:
236 model = ExamLog
237 fields = ["user", "exam"]
238
239
240 class ExamLogViewSet(viewsets.ModelViewSet):
241 permission_classes = (KolibriAuthPermissions,)
242 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
243 queryset = ExamLog.objects.all()
244 serializer_class = ExamLogSerializer
245 pagination_class = OptionalPageNumberPagination
246 filter_class = ExamLogFilter
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/core/logger/api.py b/kolibri/core/logger/api.py
--- a/kolibri/core/logger/api.py
+++ b/kolibri/core/logger/api.py
@@ -1,12 +1,17 @@
+import logging
+
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.query import F
+from django.db.utils import IntegrityError
from django.http import Http404
from django_filters import ModelChoiceFilter
from django_filters.rest_framework import CharFilter
from django_filters.rest_framework import DjangoFilterBackend
from django_filters.rest_framework import FilterSet
from rest_framework import filters
+from rest_framework import status
from rest_framework import viewsets
+from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from .models import AttemptLog
@@ -36,6 +41,8 @@
from kolibri.core.content.api import OptionalPageNumberPagination
from kolibri.core.exams.models import Exam
+logger = logging.getLogger(__name__)
+
class BaseLogFilter(FilterSet):
facility = ModelChoiceFilter(
@@ -98,6 +105,21 @@
default_response[field] = method(instance)
return Response(default_response)
+ def create(self, request, *args, **kwargs):
+ try:
+ return super(LoggerViewSet, self).create(request, *args, **kwargs)
+ except IntegrityError:
+ # The object has been created previously: let's calculate its id and return it
+ serializer = self.get_serializer(data=request.data)
+ serializer.is_valid(raise_exception=True)
+ obj = serializer.Meta.model(**serializer.validated_data)
+ obj.id = obj.calculate_uuid()
+ final_obj = self.get_serializer(obj)
+ return Response(final_obj.data)
+ except ValidationError as e:
+ logger.error("Failed to validate data: {}".format(e))
+ return Response(request.data, status.HTTP_400_BAD_REQUEST)
+
class ContentSessionLogFilter(BaseLogFilter):
class Meta:
|
{"golden_diff": "diff --git a/kolibri/core/logger/api.py b/kolibri/core/logger/api.py\n--- a/kolibri/core/logger/api.py\n+++ b/kolibri/core/logger/api.py\n@@ -1,12 +1,17 @@\n+import logging\n+\n from django.core.exceptions import ObjectDoesNotExist\n from django.db.models.query import F\n+from django.db.utils import IntegrityError\n from django.http import Http404\n from django_filters import ModelChoiceFilter\n from django_filters.rest_framework import CharFilter\n from django_filters.rest_framework import DjangoFilterBackend\n from django_filters.rest_framework import FilterSet\n from rest_framework import filters\n+from rest_framework import status\n from rest_framework import viewsets\n+from rest_framework.exceptions import ValidationError\n from rest_framework.response import Response\n \n from .models import AttemptLog\n@@ -36,6 +41,8 @@\n from kolibri.core.content.api import OptionalPageNumberPagination\n from kolibri.core.exams.models import Exam\n \n+logger = logging.getLogger(__name__)\n+\n \n class BaseLogFilter(FilterSet):\n facility = ModelChoiceFilter(\n@@ -98,6 +105,21 @@\n default_response[field] = method(instance)\n return Response(default_response)\n \n+ def create(self, request, *args, **kwargs):\n+ try:\n+ return super(LoggerViewSet, self).create(request, *args, **kwargs)\n+ except IntegrityError:\n+ # The object has been created previously: let's calculate its id and return it\n+ serializer = self.get_serializer(data=request.data)\n+ serializer.is_valid(raise_exception=True)\n+ obj = serializer.Meta.model(**serializer.validated_data)\n+ obj.id = obj.calculate_uuid()\n+ final_obj = self.get_serializer(obj)\n+ return Response(final_obj.data)\n+ except ValidationError as e:\n+ logger.error(\"Failed to validate data: {}\".format(e))\n+ return Response(request.data, status.HTTP_400_BAD_REQUEST)\n+\n \n class ContentSessionLogFilter(BaseLogFilter):\n class Meta:\n", "issue": "Select all not working in groups\n### Observed behavior\r\nIn the coach tab, while enrolling learners in the group first select single user and then select select all option and then enroll. It will enroll only one user in the group.\r\n\r\n### Expected behavior\r\nWhen we select select all, it must enroll all users in the group.\r\n\r\n### Steps to reproduce\r\n1. Create the group.\r\n2. Select one user while enrolling.\r\n3. Then select select all in groups.\r\n4. Click on enroll.\r\n5. It will enroll only one user in the group.\r\n\r\n### Context\r\n * Kolibri version : Kolibri 0.12.2\r\n * Operating system : Ubuntu 14.04\r\n * Browser : Chrome\r\n\r\n### Screenshots\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.query import F\nfrom django.http import Http404\nfrom django_filters import ModelChoiceFilter\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom rest_framework import filters\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import ExamAttemptLog\nfrom .models import ExamLog\nfrom .models import MasteryLog\nfrom .models import UserSessionLog\nfrom .permissions import ExamActivePermissions\nfrom .serializers import AttemptLogSerializer\nfrom .serializers import ContentSessionLogSerializer\nfrom .serializers import ContentSummaryLogSerializer\nfrom .serializers import ExamAttemptLogSerializer\nfrom .serializers import ExamLogSerializer\nfrom .serializers import MasteryLogSerializer\nfrom .serializers import TotalContentProgressSerializer\nfrom .serializers import UserSessionLogSerializer\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.filters import HierarchyRelationsFilter\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Collection\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.auth.models import LearnerGroup\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.exams.models import Exam\n\n\nclass BaseLogFilter(FilterSet):\n facility = ModelChoiceFilter(\n method=\"filter_facility\", queryset=Facility.objects.all()\n )\n classroom = ModelChoiceFilter(\n method=\"filter_classroom\", queryset=Classroom.objects.all()\n )\n learner_group = ModelChoiceFilter(\n method=\"filter_learner_group\", queryset=LearnerGroup.objects.all()\n )\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value, target_user=F(\"user\")\n )\n\n def filter_learner_group(self, queryset, name, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value, target_user=F(\"user\")\n )\n\n\nclass LoggerViewSet(viewsets.ModelViewSet):\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop(\"partial\", False)\n model = self.queryset.model\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n try:\n instance = model.objects.get(id=self.kwargs[lookup_url_kwarg])\n self.check_object_permissions(request, instance)\n except (ValueError, ObjectDoesNotExist):\n raise Http404\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n if getattr(instance, \"_prefetched_objects_cache\", None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n default_response = dict(request.data)\n # First look if the computed fields to be updated are listed:\n updating_fields = getattr(serializer.root, \"update_fields\", None)\n # If not, fetch all the fields that are computed methods:\n if updating_fields is None:\n updating_fields = [\n field\n for field in serializer.fields\n if getattr(serializer.fields[field], \"method_name\", None)\n ]\n for field in updating_fields:\n method_name = getattr(serializer.fields[field], \"method_name\", None)\n if method_name:\n method = getattr(serializer.root, method_name)\n default_response[field] = method(instance)\n return Response(default_response)\n\n\nclass ContentSessionLogFilter(BaseLogFilter):\n class Meta:\n model = ContentSessionLog\n fields = [\"user_id\", \"content_id\"]\n\n\nclass ContentSessionLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSessionLogFilter\n\n\nclass ContentSummaryLogFilter(BaseLogFilter):\n class Meta:\n model = ContentSummaryLog\n fields = [\"user_id\", \"content_id\"]\n\n\nclass ContentSummaryLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSummaryLogFilter\n\n\nclass TotalContentProgressViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = FacilityUser.objects.all()\n serializer_class = TotalContentProgressSerializer\n\n\nclass UserSessionLogFilter(BaseLogFilter):\n class Meta:\n model = UserSessionLog\n fields = [\"user_id\"]\n\n\nclass UserSessionLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = UserSessionLog.objects.all()\n serializer_class = UserSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = UserSessionLogFilter\n\n\nclass MasteryFilter(FilterSet):\n class Meta:\n model = MasteryLog\n fields = [\"summarylog\"]\n\n\nclass MasteryLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n serializer_class = MasteryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n serializer_class = AttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n\nclass ExamAttemptFilter(BaseLogFilter):\n exam = ModelChoiceFilter(method=\"filter_exam\", queryset=Exam.objects.all())\n user = ModelChoiceFilter(method=\"filter_user\", queryset=FacilityUser.objects.all())\n content = CharFilter(field_name=\"content_id\")\n\n def filter_exam(self, queryset, name, value):\n return queryset.filter(examlog__exam=value)\n\n def filter_user(self, queryset, name, value):\n return queryset.filter(examlog__user=value)\n\n class Meta:\n model = ExamAttemptLog\n fields = [\"examlog\", \"exam\", \"user\", \"content\", \"item\"]\n\n\nclass ExamAttemptLogViewSet(LoggerViewSet):\n permission_classes = (ExamActivePermissions, KolibriAuthPermissions)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = ExamAttemptLog.objects.all()\n serializer_class = ExamAttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamAttemptFilter\n\n\nclass ExamLogFilter(BaseLogFilter):\n\n collection = ModelChoiceFilter(\n method=\"filter_collection\", queryset=Collection.objects.all()\n )\n\n def filter_collection(self, queryset, name, collection):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n target_user=F(\"user\"), ancestor_collection=collection\n )\n\n class Meta:\n model = ExamLog\n fields = [\"user\", \"exam\"]\n\n\nclass ExamLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ExamLog.objects.all()\n serializer_class = ExamLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamLogFilter\n", "path": "kolibri/core/logger/api.py"}], "after_files": [{"content": "import logging\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.query import F\nfrom django.db.utils import IntegrityError\nfrom django.http import Http404\nfrom django_filters import ModelChoiceFilter\nfrom django_filters.rest_framework import CharFilter\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django_filters.rest_framework import FilterSet\nfrom rest_framework import filters\nfrom rest_framework import status\nfrom rest_framework import viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\n\nfrom .models import AttemptLog\nfrom .models import ContentSessionLog\nfrom .models import ContentSummaryLog\nfrom .models import ExamAttemptLog\nfrom .models import ExamLog\nfrom .models import MasteryLog\nfrom .models import UserSessionLog\nfrom .permissions import ExamActivePermissions\nfrom .serializers import AttemptLogSerializer\nfrom .serializers import ContentSessionLogSerializer\nfrom .serializers import ContentSummaryLogSerializer\nfrom .serializers import ExamAttemptLogSerializer\nfrom .serializers import ExamLogSerializer\nfrom .serializers import MasteryLogSerializer\nfrom .serializers import TotalContentProgressSerializer\nfrom .serializers import UserSessionLogSerializer\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.auth.filters import HierarchyRelationsFilter\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Collection\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.auth.models import FacilityUser\nfrom kolibri.core.auth.models import LearnerGroup\nfrom kolibri.core.content.api import OptionalPageNumberPagination\nfrom kolibri.core.exams.models import Exam\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseLogFilter(FilterSet):\n facility = ModelChoiceFilter(\n method=\"filter_facility\", queryset=Facility.objects.all()\n )\n classroom = ModelChoiceFilter(\n method=\"filter_classroom\", queryset=Classroom.objects.all()\n )\n learner_group = ModelChoiceFilter(\n method=\"filter_learner_group\", queryset=LearnerGroup.objects.all()\n )\n\n # Only a superuser can filter by facilities\n def filter_facility(self, queryset, name, value):\n return queryset.filter(user__facility=value)\n\n def filter_classroom(self, queryset, name, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value, target_user=F(\"user\")\n )\n\n def filter_learner_group(self, queryset, name, value):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n ancestor_collection=value, target_user=F(\"user\")\n )\n\n\nclass LoggerViewSet(viewsets.ModelViewSet):\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop(\"partial\", False)\n model = self.queryset.model\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n try:\n instance = model.objects.get(id=self.kwargs[lookup_url_kwarg])\n self.check_object_permissions(request, instance)\n except (ValueError, ObjectDoesNotExist):\n raise Http404\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n if getattr(instance, \"_prefetched_objects_cache\", None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n default_response = dict(request.data)\n # First look if the computed fields to be updated are listed:\n updating_fields = getattr(serializer.root, \"update_fields\", None)\n # If not, fetch all the fields that are computed methods:\n if updating_fields is None:\n updating_fields = [\n field\n for field in serializer.fields\n if getattr(serializer.fields[field], \"method_name\", None)\n ]\n for field in updating_fields:\n method_name = getattr(serializer.fields[field], \"method_name\", None)\n if method_name:\n method = getattr(serializer.root, method_name)\n default_response[field] = method(instance)\n return Response(default_response)\n\n def create(self, request, *args, **kwargs):\n try:\n return super(LoggerViewSet, self).create(request, *args, **kwargs)\n except IntegrityError:\n # The object has been created previously: let's calculate its id and return it\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n obj = serializer.Meta.model(**serializer.validated_data)\n obj.id = obj.calculate_uuid()\n final_obj = self.get_serializer(obj)\n return Response(final_obj.data)\n except ValidationError as e:\n logger.error(\"Failed to validate data: {}\".format(e))\n return Response(request.data, status.HTTP_400_BAD_REQUEST)\n\n\nclass ContentSessionLogFilter(BaseLogFilter):\n class Meta:\n model = ContentSessionLog\n fields = [\"user_id\", \"content_id\"]\n\n\nclass ContentSessionLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ContentSessionLog.objects.all()\n serializer_class = ContentSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSessionLogFilter\n\n\nclass ContentSummaryLogFilter(BaseLogFilter):\n class Meta:\n model = ContentSummaryLog\n fields = [\"user_id\", \"content_id\"]\n\n\nclass ContentSummaryLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ContentSummaryLog.objects.all()\n serializer_class = ContentSummaryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ContentSummaryLogFilter\n\n\nclass TotalContentProgressViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter,)\n queryset = FacilityUser.objects.all()\n serializer_class = TotalContentProgressSerializer\n\n\nclass UserSessionLogFilter(BaseLogFilter):\n class Meta:\n model = UserSessionLog\n fields = [\"user_id\"]\n\n\nclass UserSessionLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = UserSessionLog.objects.all()\n serializer_class = UserSessionLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = UserSessionLogFilter\n\n\nclass MasteryFilter(FilterSet):\n class Meta:\n model = MasteryLog\n fields = [\"summarylog\"]\n\n\nclass MasteryLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = MasteryLog.objects.all()\n serializer_class = MasteryLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = MasteryFilter\n\n\nclass AttemptFilter(BaseLogFilter):\n content = CharFilter(method=\"filter_content\")\n\n def filter_content(self, queryset, name, value):\n return queryset.filter(masterylog__summarylog__content_id=value)\n\n class Meta:\n model = AttemptLog\n fields = [\"masterylog\", \"complete\", \"user\", \"content\", \"item\"]\n\n\nclass AttemptLogViewSet(LoggerViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = AttemptLog.objects.all()\n serializer_class = AttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = AttemptFilter\n ordering_fields = (\"end_timestamp\",)\n ordering = (\"end_timestamp\",)\n\n\nclass ExamAttemptFilter(BaseLogFilter):\n exam = ModelChoiceFilter(method=\"filter_exam\", queryset=Exam.objects.all())\n user = ModelChoiceFilter(method=\"filter_user\", queryset=FacilityUser.objects.all())\n content = CharFilter(field_name=\"content_id\")\n\n def filter_exam(self, queryset, name, value):\n return queryset.filter(examlog__exam=value)\n\n def filter_user(self, queryset, name, value):\n return queryset.filter(examlog__user=value)\n\n class Meta:\n model = ExamAttemptLog\n fields = [\"examlog\", \"exam\", \"user\", \"content\", \"item\"]\n\n\nclass ExamAttemptLogViewSet(LoggerViewSet):\n permission_classes = (ExamActivePermissions, KolibriAuthPermissions)\n filter_backends = (\n KolibriAuthPermissionsFilter,\n DjangoFilterBackend,\n filters.OrderingFilter,\n )\n queryset = ExamAttemptLog.objects.all()\n serializer_class = ExamAttemptLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamAttemptFilter\n\n\nclass ExamLogFilter(BaseLogFilter):\n\n collection = ModelChoiceFilter(\n method=\"filter_collection\", queryset=Collection.objects.all()\n )\n\n def filter_collection(self, queryset, name, collection):\n return HierarchyRelationsFilter(queryset).filter_by_hierarchy(\n target_user=F(\"user\"), ancestor_collection=collection\n )\n\n class Meta:\n model = ExamLog\n fields = [\"user\", \"exam\"]\n\n\nclass ExamLogViewSet(viewsets.ModelViewSet):\n permission_classes = (KolibriAuthPermissions,)\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n queryset = ExamLog.objects.all()\n serializer_class = ExamLogSerializer\n pagination_class = OptionalPageNumberPagination\n filter_class = ExamLogFilter\n", "path": "kolibri/core/logger/api.py"}]}
| 3,232 | 432 |
gh_patches_debug_1225
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-28
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IndexError: list index out of range when missing predict value
**Is your feature request related to a problem? Please describe.**
When there is empty string provided as predict value e.g:
```
result = mdb.predict(predict=' ', model_name='home_rentals')
```
IndexError: list index out of range error is thrown
**Describe the solution you'd like**
User friendly message should be thrown e.g
ValueError: Please provide valid predict value
**Additional context**
We can check for empty predict values in https://github.com/mindsdb/main/blob/76c691c4b18a4723626dfcbff8228da614d93e8b/mindsdb/libs/controllers/mindsdb_controller.py#L170 and raise Value error if predict not provided.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/libs/controllers/mindsdb_controller.py`
Content:
```
1 import sqlite3
2 import pandas
3 import requests
4 import logging
5 import os
6 import platform
7 import _thread
8 import uuid
9 import traceback
10 import urllib
11
12 from mindsdb.libs.helpers.sqlite_helpers import *
13 from mindsdb.libs.helpers.multi_data_source import getDS
14 from mindsdb.config import SQLITE_FILE
15 import mindsdb.config as CONFIG
16
17 from mindsdb.libs.data_types.transaction_metadata import TransactionMetadata
18 from mindsdb.libs.controllers.session_controller import SessionController
19 from mindsdb.libs.constants.mindsdb import *
20
21 from mindsdb.version import mindsdb_version as MINDSDB_VERSION
22
23 from pathlib import Path
24
25 class MindsDBController:
26
27 def __init__(self, file=SQLITE_FILE):
28 """
29
30 :param file:
31 """
32
33 self.setConfigs()
34
35 _thread.start_new_thread(MindsDBController.checkForUpdates, ())
36 self.session = SessionController()
37 self.storage_file = file
38 self.conn = sqlite3.connect(file)
39 self.conn.create_aggregate("first_value", 1, FirstValueAgg)
40 self.conn.create_aggregate("array_agg_json", 2, ArrayAggJSON)
41
42 def setConfigs(self):
43 """
44 This sets the config settings for this mindsdb instance
45 :return:
46 """
47 # set logging settings
48 logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)
49
50 # set the mindsdb storage folder
51 storage_ok = True # default state
52
53 # if it does not exist try to create it
54 if not os.path.exists(CONFIG.MINDSDB_STORAGE_PATH):
55 try:
56 logging.info('{folder} does not exist, creating it now'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))
57 path = Path(CONFIG.MINDSDB_STORAGE_PATH)
58 path.mkdir(exist_ok=True, parents=True)
59 except:
60 logging.info(traceback.format_exc())
61 storage_ok = False
62 logging.error('MindsDB storate foldler: {folder} does not exist and could not be created'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))
63
64 # If storage path is not writable, raise an exception as this can no longer be
65 if not os.access(CONFIG.MINDSDB_STORAGE_PATH, os.W_OK) or storage_ok == False:
66 error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}'''
67 raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH))
68
69
70 def addTable(self, ds, as_table):
71 """
72
73 :param ds:
74 :param as_table:
75 :return:
76 """
77
78 ds.df.to_sql(as_table, self.conn, if_exists='replace', index=False)
79
80 def query(self, query):
81 """
82
83 :param query:
84 :return:
85 """
86
87 cur = self.conn.cursor()
88 return cur.execute(query)
89
90 def queryToDF(self, query):
91 """
92
93 :param query:
94 :return:
95 """
96
97 return pandas.read_sql_query(query, self.conn)
98
99
100 def setUserEmail(self, email):
101 """
102
103 :param email:
104 :return:
105 """
106 email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'
107
108
109 try:
110 open(email_file, 'w').write(email)
111 return True
112 except:
113 logging.warning('Cannot store token, Please add write permissions to file:' + email_file)
114 return False
115
116 def getUserEmail(self):
117 """
118
119 :return:
120 """
121 email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'
122 email_file_path = Path(email_file)
123
124 try:
125 if email_file_path.is_file():
126 email = open(email_file, 'r').read()
127 return email
128 else:
129 return None
130 except:
131 logging.warning('Cannot read email, Please add write permissions to file:' + email_file)
132 return None
133
134 def learn(self, predict, from_file=None, from_data = None, model_name='mdsb_model', test_from_data=None, group_by = None, window_size = MODEL_GROUP_BY_DEAFAULT_LIMIT, order_by = [], breakpoint = PHASE_END, ignore_columns = []):
135 """
136
137 :param from_query:
138 :param predict:
139 :param model_name:
140 :param test_query:
141 :return:
142 """
143
144 from_ds = getDS(from_data) if from_file is None else getDS(from_file)
145 test_from_ds = test_from_data if test_from_data is None else getDS(test_from_data)
146
147 transaction_type = TRANSACTION_LEARN
148
149 predict_columns = [predict] if type(predict) != type([]) else predict
150
151 transaction_metadata = TransactionMetadata()
152 transaction_metadata.model_name = model_name
153 transaction_metadata.model_predict_columns = predict_columns
154 transaction_metadata.model_group_by = group_by
155 transaction_metadata.model_order_by = order_by if type(order_by) == type([]) else [order_by]
156 transaction_metadata.window_size = window_size
157 transaction_metadata.type = transaction_type
158 transaction_metadata.from_data = from_ds
159 transaction_metadata.test_from_data = test_from_ds
160 transaction_metadata.ignore_columns = ignore_columns
161
162 self.startInfoServer()
163 self.session.newTransaction(transaction_metadata, breakpoint)
164
165
166 def startInfoServer(self):
167 pass
168
169 def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', breakpoint= PHASE_END):
170 """
171
172 :param predict:
173 :param when:
174 :param model_name:
175 :return:
176 """
177
178 transaction_type = TRANSACTION_PREDICT
179
180 from_ds = None if from_data is None else getDS(from_data)
181
182 predict_columns = [predict] if type(predict) != type([]) else predict
183
184 transaction_metadata = TransactionMetadata()
185 transaction_metadata.model_name = model_name
186 transaction_metadata.model_predict_columns = predict_columns
187 transaction_metadata.model_when_conditions = when
188 transaction_metadata.type = transaction_type
189 transaction_metadata.storage_file = self.storage_file
190 transaction_metadata.from_data = from_ds
191
192 transaction = self.session.newTransaction(transaction_metadata, breakpoint)
193
194 return transaction.output_data
195
196 @staticmethod
197 def checkForUpdates():
198 # tmp files
199 uuid_file = CONFIG.MINDSDB_STORAGE_PATH + '/../uuid.mdb_base'
200 mdb_file = CONFIG.MINDSDB_STORAGE_PATH + '/start.mdb_base'
201
202 uuid_file_path = Path(uuid_file)
203 if uuid_file_path.is_file():
204 uuid_str = open(uuid_file).read()
205 else:
206 uuid_str = str(uuid.uuid4())
207 try:
208 open(uuid_file, 'w').write(uuid_str)
209 except:
210 logging.warning('Cannot store token, Please add write permissions to file:' + uuid_file)
211 uuid_str = uuid_str + '.NO_WRITE'
212
213 file_path = Path(mdb_file)
214 if file_path.is_file():
215 token = open(mdb_file).read()
216 else:
217 token = '{system}|{version}|{uid}'.format(system=platform.system(), version=MINDSDB_VERSION, uid=uuid_str)
218 try:
219 open(mdb_file,'w').write(token)
220 except:
221 logging.warning('Cannot store token, Please add write permissions to file:'+mdb_file)
222 token = token+'.NO_WRITE'
223 extra = urllib.parse.quote_plus(token)
224 try:
225 r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})
226 except:
227 logging.warning('Could not check for updates')
228 return
229 try:
230 # TODO: Extract version, compare with version in version.py
231 ret = r.json()
232
233 if 'version' in ret and ret['version']!= MINDSDB_VERSION:
234 logging.warning("There is a new version of MindsDB {version}, please do:\n pip3 uninstall mindsdb\n pip2 install mindsdb --user".format(version=ret['version']))
235 else:
236 logging.debug('MindsDB is up to date!')
237
238 except:
239
240 logging.warning('could not check for MindsDB updates')
241
242
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/libs/controllers/mindsdb_controller.py b/mindsdb/libs/controllers/mindsdb_controller.py
--- a/mindsdb/libs/controllers/mindsdb_controller.py
+++ b/mindsdb/libs/controllers/mindsdb_controller.py
@@ -174,6 +174,9 @@
:param model_name:
:return:
"""
+
+ if not predict:
+ raise ValueError('Please provide valid predict value.')
transaction_type = TRANSACTION_PREDICT
|
{"golden_diff": "diff --git a/mindsdb/libs/controllers/mindsdb_controller.py b/mindsdb/libs/controllers/mindsdb_controller.py\n--- a/mindsdb/libs/controllers/mindsdb_controller.py\n+++ b/mindsdb/libs/controllers/mindsdb_controller.py\n@@ -174,6 +174,9 @@\n :param model_name:\n :return:\n \"\"\"\n+ \n+ if not predict:\n+ raise ValueError('Please provide valid predict value.')\n \n transaction_type = TRANSACTION_PREDICT\n", "issue": "IndexError: list index out of range when missing predict value\n**Is your feature request related to a problem? Please describe.**\r\nWhen there is empty string provided as predict value e.g:\r\n``` \r\nresult = mdb.predict(predict=' ', model_name='home_rentals')\r\n```\r\nIndexError: list index out of range error is thrown\r\n\r\n**Describe the solution you'd like**\r\nUser friendly message should be thrown e.g \r\nValueError: Please provide valid predict value\r\n\r\n**Additional context**\r\nWe can check for empty predict values in https://github.com/mindsdb/main/blob/76c691c4b18a4723626dfcbff8228da614d93e8b/mindsdb/libs/controllers/mindsdb_controller.py#L170 and raise Value error if predict not provided.\n", "before_files": [{"content": "import sqlite3\nimport pandas\nimport requests\nimport logging\nimport os\nimport platform\nimport _thread\nimport uuid\nimport traceback\nimport urllib\n\nfrom mindsdb.libs.helpers.sqlite_helpers import *\nfrom mindsdb.libs.helpers.multi_data_source import getDS\nfrom mindsdb.config import SQLITE_FILE\nimport mindsdb.config as CONFIG\n\nfrom mindsdb.libs.data_types.transaction_metadata import TransactionMetadata\nfrom mindsdb.libs.controllers.session_controller import SessionController\nfrom mindsdb.libs.constants.mindsdb import *\n\nfrom mindsdb.version import mindsdb_version as MINDSDB_VERSION\n\nfrom pathlib import Path\n\nclass MindsDBController:\n\n def __init__(self, file=SQLITE_FILE):\n \"\"\"\n\n :param file:\n \"\"\"\n\n self.setConfigs()\n\n _thread.start_new_thread(MindsDBController.checkForUpdates, ())\n self.session = SessionController()\n self.storage_file = file\n self.conn = sqlite3.connect(file)\n self.conn.create_aggregate(\"first_value\", 1, FirstValueAgg)\n self.conn.create_aggregate(\"array_agg_json\", 2, ArrayAggJSON)\n\n def setConfigs(self):\n \"\"\"\n This sets the config settings for this mindsdb instance\n :return:\n \"\"\"\n # set logging settings\n logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)\n\n # set the mindsdb storage folder\n storage_ok = True # default state\n\n # if it does not exist try to create it\n if not os.path.exists(CONFIG.MINDSDB_STORAGE_PATH):\n try:\n logging.info('{folder} does not exist, creating it now'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n path = Path(CONFIG.MINDSDB_STORAGE_PATH)\n path.mkdir(exist_ok=True, parents=True)\n except:\n logging.info(traceback.format_exc())\n storage_ok = False\n logging.error('MindsDB storate foldler: {folder} does not exist and could not be created'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n # If storage path is not writable, raise an exception as this can no longer be\n if not os.access(CONFIG.MINDSDB_STORAGE_PATH, os.W_OK) or storage_ok == False:\n error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}'''\n raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n\n def addTable(self, ds, as_table):\n \"\"\"\n\n :param ds:\n :param as_table:\n :return:\n \"\"\"\n\n ds.df.to_sql(as_table, self.conn, if_exists='replace', index=False)\n\n def query(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n cur = self.conn.cursor()\n return cur.execute(query)\n\n def queryToDF(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n return pandas.read_sql_query(query, self.conn)\n\n\n def setUserEmail(self, email):\n \"\"\"\n\n :param email:\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n\n\n try:\n open(email_file, 'w').write(email)\n return True\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + email_file)\n return False\n\n def getUserEmail(self):\n \"\"\"\n\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n email_file_path = Path(email_file)\n\n try:\n if email_file_path.is_file():\n email = open(email_file, 'r').read()\n return email\n else:\n return None\n except:\n logging.warning('Cannot read email, Please add write permissions to file:' + email_file)\n return None\n\n def learn(self, predict, from_file=None, from_data = None, model_name='mdsb_model', test_from_data=None, group_by = None, window_size = MODEL_GROUP_BY_DEAFAULT_LIMIT, order_by = [], breakpoint = PHASE_END, ignore_columns = []):\n \"\"\"\n\n :param from_query:\n :param predict:\n :param model_name:\n :param test_query:\n :return:\n \"\"\"\n\n from_ds = getDS(from_data) if from_file is None else getDS(from_file)\n test_from_ds = test_from_data if test_from_data is None else getDS(test_from_data)\n\n transaction_type = TRANSACTION_LEARN\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_group_by = group_by\n transaction_metadata.model_order_by = order_by if type(order_by) == type([]) else [order_by]\n transaction_metadata.window_size = window_size\n transaction_metadata.type = transaction_type\n transaction_metadata.from_data = from_ds\n transaction_metadata.test_from_data = test_from_ds\n transaction_metadata.ignore_columns = ignore_columns\n\n self.startInfoServer()\n self.session.newTransaction(transaction_metadata, breakpoint)\n\n\n def startInfoServer(self):\n pass\n\n def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', breakpoint= PHASE_END):\n \"\"\"\n\n :param predict:\n :param when:\n :param model_name:\n :return:\n \"\"\"\n\n transaction_type = TRANSACTION_PREDICT\n\n from_ds = None if from_data is None else getDS(from_data)\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_when_conditions = when\n transaction_metadata.type = transaction_type\n transaction_metadata.storage_file = self.storage_file\n transaction_metadata.from_data = from_ds\n\n transaction = self.session.newTransaction(transaction_metadata, breakpoint)\n\n return transaction.output_data\n\n @staticmethod\n def checkForUpdates():\n # tmp files\n uuid_file = CONFIG.MINDSDB_STORAGE_PATH + '/../uuid.mdb_base'\n mdb_file = CONFIG.MINDSDB_STORAGE_PATH + '/start.mdb_base'\n\n uuid_file_path = Path(uuid_file)\n if uuid_file_path.is_file():\n uuid_str = open(uuid_file).read()\n else:\n uuid_str = str(uuid.uuid4())\n try:\n open(uuid_file, 'w').write(uuid_str)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + uuid_file)\n uuid_str = uuid_str + '.NO_WRITE'\n\n file_path = Path(mdb_file)\n if file_path.is_file():\n token = open(mdb_file).read()\n else:\n token = '{system}|{version}|{uid}'.format(system=platform.system(), version=MINDSDB_VERSION, uid=uuid_str)\n try:\n open(mdb_file,'w').write(token)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:'+mdb_file)\n token = token+'.NO_WRITE'\n extra = urllib.parse.quote_plus(token)\n try:\n r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})\n except:\n logging.warning('Could not check for updates')\n return\n try:\n # TODO: Extract version, compare with version in version.py\n ret = r.json()\n\n if 'version' in ret and ret['version']!= MINDSDB_VERSION:\n logging.warning(\"There is a new version of MindsDB {version}, please do:\\n pip3 uninstall mindsdb\\n pip2 install mindsdb --user\".format(version=ret['version']))\n else:\n logging.debug('MindsDB is up to date!')\n\n except:\n\n logging.warning('could not check for MindsDB updates')\n\n\n", "path": "mindsdb/libs/controllers/mindsdb_controller.py"}], "after_files": [{"content": "import sqlite3\nimport pandas\nimport requests\nimport logging\nimport os\nimport platform\nimport _thread\nimport uuid\nimport traceback\nimport urllib\n\nfrom mindsdb.libs.helpers.sqlite_helpers import *\nfrom mindsdb.libs.helpers.multi_data_source import getDS\nfrom mindsdb.config import SQLITE_FILE\nimport mindsdb.config as CONFIG\n\nfrom mindsdb.libs.data_types.transaction_metadata import TransactionMetadata\nfrom mindsdb.libs.controllers.session_controller import SessionController\nfrom mindsdb.libs.constants.mindsdb import *\n\nfrom mindsdb.version import mindsdb_version as MINDSDB_VERSION\n\nfrom pathlib import Path\n\nclass MindsDBController:\n\n def __init__(self, file=SQLITE_FILE):\n \"\"\"\n\n :param file:\n \"\"\"\n\n self.setConfigs()\n\n _thread.start_new_thread(MindsDBController.checkForUpdates, ())\n self.session = SessionController()\n self.storage_file = file\n self.conn = sqlite3.connect(file)\n self.conn.create_aggregate(\"first_value\", 1, FirstValueAgg)\n self.conn.create_aggregate(\"array_agg_json\", 2, ArrayAggJSON)\n\n def setConfigs(self):\n \"\"\"\n This sets the config settings for this mindsdb instance\n :return:\n \"\"\"\n # set logging settings\n logging.basicConfig(**CONFIG.PROXY_LOG_CONFIG)\n\n # set the mindsdb storage folder\n storage_ok = True # default state\n\n # if it does not exist try to create it\n if not os.path.exists(CONFIG.MINDSDB_STORAGE_PATH):\n try:\n logging.info('{folder} does not exist, creating it now'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n path = Path(CONFIG.MINDSDB_STORAGE_PATH)\n path.mkdir(exist_ok=True, parents=True)\n except:\n logging.info(traceback.format_exc())\n storage_ok = False\n logging.error('MindsDB storate foldler: {folder} does not exist and could not be created'.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n # If storage path is not writable, raise an exception as this can no longer be\n if not os.access(CONFIG.MINDSDB_STORAGE_PATH, os.W_OK) or storage_ok == False:\n error_message = '''Cannot write into storage path, please either set the config variable mindsdb.config.set('MINDSDB_STORAGE_PATH',<path>) or give write access to {folder}'''\n raise ValueError(error_message.format(folder=CONFIG.MINDSDB_STORAGE_PATH))\n\n\n def addTable(self, ds, as_table):\n \"\"\"\n\n :param ds:\n :param as_table:\n :return:\n \"\"\"\n\n ds.df.to_sql(as_table, self.conn, if_exists='replace', index=False)\n\n def query(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n cur = self.conn.cursor()\n return cur.execute(query)\n\n def queryToDF(self, query):\n \"\"\"\n\n :param query:\n :return:\n \"\"\"\n\n return pandas.read_sql_query(query, self.conn)\n\n\n def setUserEmail(self, email):\n \"\"\"\n\n :param email:\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n\n\n try:\n open(email_file, 'w').write(email)\n return True\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + email_file)\n return False\n\n def getUserEmail(self):\n \"\"\"\n\n :return:\n \"\"\"\n email_file = CONFIG.MINDSDB_STORAGE_PATH + '/../email.mdb_base'\n email_file_path = Path(email_file)\n\n try:\n if email_file_path.is_file():\n email = open(email_file, 'r').read()\n return email\n else:\n return None\n except:\n logging.warning('Cannot read email, Please add write permissions to file:' + email_file)\n return None\n\n def learn(self, predict, from_file=None, from_data = None, model_name='mdsb_model', test_from_data=None, group_by = None, window_size = MODEL_GROUP_BY_DEAFAULT_LIMIT, order_by = [], breakpoint = PHASE_END, ignore_columns = []):\n \"\"\"\n\n :param from_query:\n :param predict:\n :param model_name:\n :param test_query:\n :return:\n \"\"\"\n\n from_ds = getDS(from_data) if from_file is None else getDS(from_file)\n test_from_ds = test_from_data if test_from_data is None else getDS(test_from_data)\n\n transaction_type = TRANSACTION_LEARN\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_group_by = group_by\n transaction_metadata.model_order_by = order_by if type(order_by) == type([]) else [order_by]\n transaction_metadata.window_size = window_size\n transaction_metadata.type = transaction_type\n transaction_metadata.from_data = from_ds\n transaction_metadata.test_from_data = test_from_ds\n transaction_metadata.ignore_columns = ignore_columns\n\n self.startInfoServer()\n self.session.newTransaction(transaction_metadata, breakpoint)\n\n\n def startInfoServer(self):\n pass\n\n def predict(self, predict, from_data = None, when={}, model_name='mdsb_model', breakpoint= PHASE_END):\n \"\"\"\n\n :param predict:\n :param when:\n :param model_name:\n :return:\n \"\"\"\n \n if not predict:\n raise ValueError('Please provide valid predict value.')\n\n transaction_type = TRANSACTION_PREDICT\n\n from_ds = None if from_data is None else getDS(from_data)\n\n predict_columns = [predict] if type(predict) != type([]) else predict\n\n transaction_metadata = TransactionMetadata()\n transaction_metadata.model_name = model_name\n transaction_metadata.model_predict_columns = predict_columns\n transaction_metadata.model_when_conditions = when\n transaction_metadata.type = transaction_type\n transaction_metadata.storage_file = self.storage_file\n transaction_metadata.from_data = from_ds\n\n transaction = self.session.newTransaction(transaction_metadata, breakpoint)\n\n return transaction.output_data\n\n @staticmethod\n def checkForUpdates():\n # tmp files\n uuid_file = CONFIG.MINDSDB_STORAGE_PATH + '/../uuid.mdb_base'\n mdb_file = CONFIG.MINDSDB_STORAGE_PATH + '/start.mdb_base'\n\n uuid_file_path = Path(uuid_file)\n if uuid_file_path.is_file():\n uuid_str = open(uuid_file).read()\n else:\n uuid_str = str(uuid.uuid4())\n try:\n open(uuid_file, 'w').write(uuid_str)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:' + uuid_file)\n uuid_str = uuid_str + '.NO_WRITE'\n\n file_path = Path(mdb_file)\n if file_path.is_file():\n token = open(mdb_file).read()\n else:\n token = '{system}|{version}|{uid}'.format(system=platform.system(), version=MINDSDB_VERSION, uid=uuid_str)\n try:\n open(mdb_file,'w').write(token)\n except:\n logging.warning('Cannot store token, Please add write permissions to file:'+mdb_file)\n token = token+'.NO_WRITE'\n extra = urllib.parse.quote_plus(token)\n try:\n r = requests.get('http://mindsdb.com/updates/check/{extra}'.format(extra=extra), headers={'referer': 'http://check.mindsdb.com/?token={token}'.format(token=token)})\n except:\n logging.warning('Could not check for updates')\n return\n try:\n # TODO: Extract version, compare with version in version.py\n ret = r.json()\n\n if 'version' in ret and ret['version']!= MINDSDB_VERSION:\n logging.warning(\"There is a new version of MindsDB {version}, please do:\\n pip3 uninstall mindsdb\\n pip2 install mindsdb --user\".format(version=ret['version']))\n else:\n logging.debug('MindsDB is up to date!')\n\n except:\n\n logging.warning('could not check for MindsDB updates')\n\n\n", "path": "mindsdb/libs/controllers/mindsdb_controller.py"}]}
| 2,822 | 106 |
gh_patches_debug_13560
|
rasdani/github-patches
|
git_diff
|
ydataai__ydata-profiling-1109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cramer correlation matrix is not computed
/usr/local/lib/python3.7/dist-packages/pandas_profiling/model/correlations.py:61: UserWarning:There was an attempt to calculate the cramers correlation, but this failed.
To hide this warning, disable the calculation
(using `df.profile_report(correlations={"cramers": {"calculate": False}})`
If this is problematic for your use case, please report this as an issue:
https://github.com/ydataai/pandas-profiling/issues
(include the error message: 'No data; `observed` has size 0.')
pls help. My dataset is https://busan302.mycourses.work/data/house_price_train.csv
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pandas_profiling/model/pandas/correlations_pandas.py`
Content:
```
1 """Correlations between variables."""
2 import itertools
3 import warnings
4 from typing import Optional
5
6 import numpy as np
7 import pandas as pd
8 from scipy import stats
9
10 from pandas_profiling.config import Settings
11 from pandas_profiling.model.correlations import (
12 Cramers,
13 Kendall,
14 Pearson,
15 PhiK,
16 Spearman,
17 )
18
19
20 @Spearman.compute.register(Settings, pd.DataFrame, dict)
21 def pandas_spearman_compute(
22 config: Settings, df: pd.DataFrame, summary: dict
23 ) -> Optional[pd.DataFrame]:
24 return df.corr(method="spearman")
25
26
27 @Pearson.compute.register(Settings, pd.DataFrame, dict)
28 def pandas_pearson_compute(
29 config: Settings, df: pd.DataFrame, summary: dict
30 ) -> Optional[pd.DataFrame]:
31 return df.corr(method="pearson")
32
33
34 @Kendall.compute.register(Settings, pd.DataFrame, dict)
35 def pandas_kendall_compute(
36 config: Settings, df: pd.DataFrame, summary: dict
37 ) -> Optional[pd.DataFrame]:
38 return df.corr(method="kendall")
39
40
41 def _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:
42 """Calculate the Cramer's V corrected stat for two variables.
43
44 Args:
45 confusion_matrix: Crosstab between two variables.
46 correction: Should the correction be applied?
47
48 Returns:
49 The Cramer's V corrected stat for the two variables.
50 """
51 chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]
52 n = confusion_matrix.sum().sum()
53 phi2 = chi2 / n
54 r = confusion_matrix.shape[0]
55 k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1
56
57 # Deal with NaNs later on
58 with np.errstate(divide="ignore", invalid="ignore"):
59 phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))
60 rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)
61 kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)
62 rkcorr = min((kcorr - 1.0), (rcorr - 1.0))
63 if rkcorr == 0.0:
64 corr = 1.0
65 else:
66 corr = np.sqrt(phi2corr / rkcorr)
67 return corr
68
69
70 @Cramers.compute.register(Settings, pd.DataFrame, dict)
71 def pandas_cramers_compute(
72 config: Settings, df: pd.DataFrame, summary: dict
73 ) -> Optional[pd.DataFrame]:
74 threshold = config.categorical_maximum_correlation_distinct
75
76 # `index` and `columns` must not be a set since Pandas 1.5,
77 # so convert it to a list. The order of the list is arbitrary.
78 categoricals = list(
79 {
80 key
81 for key, value in summary.items()
82 if value["type"] in {"Categorical", "Boolean"}
83 and value["n_distinct"] <= threshold
84 }
85 )
86
87 if len(categoricals) <= 1:
88 return None
89
90 matrix = np.zeros((len(categoricals), len(categoricals)))
91 np.fill_diagonal(matrix, 1.0)
92 correlation_matrix = pd.DataFrame(
93 matrix,
94 index=categoricals,
95 columns=categoricals,
96 )
97
98 for name1, name2 in itertools.combinations(categoricals, 2):
99 confusion_matrix = pd.crosstab(df[name1], df[name2])
100 correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(
101 confusion_matrix, correction=True
102 )
103 correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]
104 return correlation_matrix
105
106
107 @PhiK.compute.register(Settings, pd.DataFrame, dict)
108 def pandas_phik_compute(
109 config: Settings, df: pd.DataFrame, summary: dict
110 ) -> Optional[pd.DataFrame]:
111 df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}
112
113 intcols = {
114 key
115 for key, value in summary.items()
116 # DateTime currently excluded
117 # In some use cases, it makes sense to convert it to interval
118 # See https://github.com/KaveIO/PhiK/issues/7
119 if value["type"] == "Numeric" and 1 < value["n_distinct"]
120 }
121
122 selcols = {
123 key
124 for key, value in summary.items()
125 if value["type"] != "Unsupported"
126 and 1 < value["n_distinct"] <= config.categorical_maximum_correlation_distinct
127 }
128 selcols = selcols.union(intcols)
129 selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])
130
131 if len(selected_cols) <= 1:
132 return None
133
134 with warnings.catch_warnings():
135 warnings.simplefilter("ignore")
136 from phik import phik_matrix
137
138 correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))
139
140 return correlation
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py
--- a/src/pandas_profiling/model/pandas/correlations_pandas.py
+++ b/src/pandas_profiling/model/pandas/correlations_pandas.py
@@ -97,9 +97,12 @@
for name1, name2 in itertools.combinations(categoricals, 2):
confusion_matrix = pd.crosstab(df[name1], df[name2])
- correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(
- confusion_matrix, correction=True
- )
+ if confusion_matrix.empty:
+ correlation_matrix.loc[name2, name1] = np.nan
+ else:
+ correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(
+ confusion_matrix, correction=True
+ )
correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]
return correlation_matrix
|
{"golden_diff": "diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py\n--- a/src/pandas_profiling/model/pandas/correlations_pandas.py\n+++ b/src/pandas_profiling/model/pandas/correlations_pandas.py\n@@ -97,9 +97,12 @@\n \n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n- correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n- confusion_matrix, correction=True\n- )\n+ if confusion_matrix.empty:\n+ correlation_matrix.loc[name2, name1] = np.nan\n+ else:\n+ correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n+ confusion_matrix, correction=True\n+ )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n", "issue": "Cramer correlation matrix is not computed\n/usr/local/lib/python3.7/dist-packages/pandas_profiling/model/correlations.py:61: UserWarning:There was an attempt to calculate the cramers correlation, but this failed.\r\nTo hide this warning, disable the calculation\r\n(using `df.profile_report(correlations={\"cramers\": {\"calculate\": False}})`\r\nIf this is problematic for your use case, please report this as an issue:\r\nhttps://github.com/ydataai/pandas-profiling/issues\r\n(include the error message: 'No data; `observed` has size 0.')\r\n pls help. My dataset is https://busan302.mycourses.work/data/house_price_train.csv\n", "before_files": [{"content": "\"\"\"Correlations between variables.\"\"\"\nimport itertools\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import (\n Cramers,\n Kendall,\n Pearson,\n PhiK,\n Spearman,\n)\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_spearman_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"spearman\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_pearson_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"pearson\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_kendall_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"kendall\")\n\n\ndef _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:\n \"\"\"Calculate the Cramer's V corrected stat for two variables.\n\n Args:\n confusion_matrix: Crosstab between two variables.\n correction: Should the correction be applied?\n\n Returns:\n The Cramer's V corrected stat for the two variables.\n \"\"\"\n chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2 / n\n r = confusion_matrix.shape[0]\n k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1\n\n # Deal with NaNs later on\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))\n rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)\n kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)\n rkcorr = min((kcorr - 1.0), (rcorr - 1.0))\n if rkcorr == 0.0:\n corr = 1.0\n else:\n corr = np.sqrt(phi2corr / rkcorr)\n return corr\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_cramers_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n threshold = config.categorical_maximum_correlation_distinct\n\n # `index` and `columns` must not be a set since Pandas 1.5,\n # so convert it to a list. The order of the list is arbitrary.\n categoricals = list(\n {\n key\n for key, value in summary.items()\n if value[\"type\"] in {\"Categorical\", \"Boolean\"}\n and value[\"n_distinct\"] <= threshold\n }\n )\n\n if len(categoricals) <= 1:\n return None\n\n matrix = np.zeros((len(categoricals), len(categoricals)))\n np.fill_diagonal(matrix, 1.0)\n correlation_matrix = pd.DataFrame(\n matrix,\n index=categoricals,\n columns=categoricals,\n )\n\n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n confusion_matrix, correction=True\n )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}\n\n intcols = {\n key\n for key, value in summary.items()\n # DateTime currently excluded\n # In some use cases, it makes sense to convert it to interval\n # See https://github.com/KaveIO/PhiK/issues/7\n if value[\"type\"] == \"Numeric\" and 1 < value[\"n_distinct\"]\n }\n\n selcols = {\n key\n for key, value in summary.items()\n if value[\"type\"] != \"Unsupported\"\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])\n\n if len(selected_cols) <= 1:\n return None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n\n correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))\n\n return correlation\n", "path": "src/pandas_profiling/model/pandas/correlations_pandas.py"}], "after_files": [{"content": "\"\"\"Correlations between variables.\"\"\"\nimport itertools\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import (\n Cramers,\n Kendall,\n Pearson,\n PhiK,\n Spearman,\n)\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_spearman_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"spearman\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_pearson_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"pearson\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_kendall_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"kendall\")\n\n\ndef _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:\n \"\"\"Calculate the Cramer's V corrected stat for two variables.\n\n Args:\n confusion_matrix: Crosstab between two variables.\n correction: Should the correction be applied?\n\n Returns:\n The Cramer's V corrected stat for the two variables.\n \"\"\"\n chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2 / n\n r = confusion_matrix.shape[0]\n k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1\n\n # Deal with NaNs later on\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))\n rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)\n kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)\n rkcorr = min((kcorr - 1.0), (rcorr - 1.0))\n if rkcorr == 0.0:\n corr = 1.0\n else:\n corr = np.sqrt(phi2corr / rkcorr)\n return corr\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_cramers_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n threshold = config.categorical_maximum_correlation_distinct\n\n # `index` and `columns` must not be a set since Pandas 1.5,\n # so convert it to a list. The order of the list is arbitrary.\n categoricals = list(\n {\n key\n for key, value in summary.items()\n if value[\"type\"] in {\"Categorical\", \"Boolean\"}\n and value[\"n_distinct\"] <= threshold\n }\n )\n\n if len(categoricals) <= 1:\n return None\n\n matrix = np.zeros((len(categoricals), len(categoricals)))\n np.fill_diagonal(matrix, 1.0)\n correlation_matrix = pd.DataFrame(\n matrix,\n index=categoricals,\n columns=categoricals,\n )\n\n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n if confusion_matrix.empty:\n correlation_matrix.loc[name2, name1] = np.nan\n else:\n correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n confusion_matrix, correction=True\n )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}\n\n intcols = {\n key\n for key, value in summary.items()\n # DateTime currently excluded\n # In some use cases, it makes sense to convert it to interval\n # See https://github.com/KaveIO/PhiK/issues/7\n if value[\"type\"] == \"Numeric\" and 1 < value[\"n_distinct\"]\n }\n\n selcols = {\n key\n for key, value in summary.items()\n if value[\"type\"] != \"Unsupported\"\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])\n\n if len(selected_cols) <= 1:\n return None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n\n correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))\n\n return correlation\n", "path": "src/pandas_profiling/model/pandas/correlations_pandas.py"}]}
| 1,876 | 229 |
gh_patches_debug_36945
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1602
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GB-NIR fetch historical data available with 15-min
Using the same sources than current parser for live data.
http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx
http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx
http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx
Quite interesting as current database is 80% empty for NIR

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/GB_NIR.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from collections import defaultdict
4 from datetime import datetime
5 from io import StringIO
6 from operator import itemgetter
7
8 import logging
9 import pandas as pd
10 import requests
11 from bs4 import BeautifulSoup
12 from dateutil import parser, tz
13
14 from .lib.validation import validate
15
16 thermal_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx'
17 wind_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx'
18 exchange_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx'
19 # Positive values represent imports to Northern Ireland.
20 # Negative value represent exports from Northern Ireland.
21
22
23 def get_data(url, session=None):
24 """
25 Requests data from a specified url in CSV format.
26 Returns a response.text object.
27 """
28
29 s = session or requests.Session()
30
31 headers = {
32 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',
33 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
34 }
35
36 pagereq = requests.get(url, headers=headers)
37 soup = BeautifulSoup(pagereq.text, 'html.parser')
38
39 # Find and define parameters needed to send a POST request for the actual data.
40 viewstategenerator = soup.find("input", attrs={'id': '__VIEWSTATEGENERATOR'})['value']
41 viewstate = soup.find("input", attrs={'id': '__VIEWSTATE'})['value']
42 eventvalidation = soup.find("input", attrs={'id': '__EVENTVALIDATION'})['value']
43
44 # Set date for post request.
45 current_date = datetime.now().date()
46 month = current_date.month
47 day = current_date.day
48 year = current_date.year
49
50 FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],"01%s-%s-%s-0-0-0-0"]' % (year, month, day, '', year, month, day)
51 ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],"01%s-%s-%s-0-0-0-0"]' % (year, month, day, '', year, month, day)
52 btnDownloadCSV = 'Download+CSV'
53 ig_def_dp_cal_clientState = '|0|15,2017,09,2017,%s,%s||[[null,[],null],[{%s},[]],"11,2017,09,2017,%s,%s"]' % (month, day, '', month, day)
54 IG_CSS_LINKS_ = 'ig_res/default/ig_monthcalendar.css|ig_res/default/ig_texteditor.css|ig_res/default/ig_shared.css'
55
56 postdata = {'__VIEWSTATE': viewstate,
57 '__VIEWSTATEGENERATOR': viewstategenerator,
58 '__EVENTVALIDATION': eventvalidation,
59 'FromDatePicker_clientState': FromDatePicker_clientState,
60 'ToDatePicker_clientState': ToDatePicker_clientState,
61 'btnDownloadCSV': btnDownloadCSV,
62 '_ig_def_dp_cal_clientState': ig_def_dp_cal_clientState,
63 '_IG_CSS_LINKS_': IG_CSS_LINKS_
64 }
65
66 postheaders = {
67 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',
68 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
69 'Content-Type': 'application/x-www-form-urlencoded'
70 }
71
72 datareq = s.post(url, headers=postheaders, data=postdata)
73
74 return datareq.text
75
76
77 def add_default_tz(timestamp):
78 """
79 Adds Northern Ireland timezone to datetime object if tz = None.
80 """
81
82 NIR = tz.gettz('Europe/Belfast')
83 modified_timestamp = timestamp.replace(tzinfo=timestamp.tzinfo or NIR)
84
85 return modified_timestamp
86
87
88 def create_thermal_df(text_data):
89 """
90 Turns thermal csv data into a usable dataframe.
91 """
92
93 cols_to_use = [0, 1, 2, 3, 4, 5]
94 df_thermal = pd.read_csv(StringIO(text_data),
95 usecols=cols_to_use)
96 df_thermal.fillna(0.0, inplace=True)
97
98 return df_thermal
99
100
101 def create_wind_df(text_data):
102 """
103 Turns wind csv data into a usable dataframe.
104 """
105
106 cols_to_use = [0, 1]
107 df_wind = pd.read_csv(StringIO(text_data),
108 usecols=cols_to_use)
109 df_wind.fillna(0.0, inplace=True)
110
111 return df_wind
112
113
114 def create_exchange_df(text_data):
115 """
116 Turns exchange csv data into a usable dataframe.
117 """
118
119 df_exchange = pd.read_csv(StringIO(text_data))
120 df_exchange.fillna(0.0, inplace=True)
121
122 return df_exchange
123
124
125 def thermal_processor(df):
126 """
127 Creates quarter hour datapoints for thermal production.
128 Returns a list.
129 """
130
131 datapoints = []
132 for index, row in df.iterrows():
133 snapshot = {}
134 snapshot['datetime'] = row['TimeStamp']
135 snapshot['gas'] = row['Gas_MW']
136 snapshot['coal'] = row['Coal_MW']
137 snapshot['oil'] = row['Distillate_MW'] + row['Diesel_MW']
138 datapoints.append(snapshot)
139
140 return datapoints
141
142
143 def wind_processor(df):
144 """
145 Creates quarter hour datapoints for wind production.
146 Returns a list.
147 """
148
149 datapoints = []
150 for index, row in df.iterrows():
151 snapshot = {}
152 snapshot['datetime'] = row['TimeStamp']
153 snapshot['wind'] = row['Total_Wind_Generated_MW']
154 if snapshot['wind'] > -20:
155 snapshot['wind'] = max(snapshot['wind'], 0)
156 datapoints.append(snapshot)
157
158 return datapoints
159
160
161 def moyle_processor(df):
162 """
163 Creates quarter hour datapoints for GB exchange.
164 Returns a list.
165 """
166
167 datapoints = []
168 for index, row in df.iterrows():
169 snapshot = {}
170 snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],
171 dayfirst=True))
172 snapshot['netFlow'] = row['Total_Moyle_Load_MW']
173 snapshot['source'] = 'soni.ltd.uk'
174 snapshot['sortedZoneKeys'] = 'GB->GB-NIR'
175 datapoints.append(snapshot)
176
177 return datapoints
178
179
180 def IE_processor(df):
181 """
182 Creates quarter hour datapoints for IE exchange.
183 Returns a list.
184 """
185
186 datapoints = []
187 for index, row in df.iterrows():
188 snapshot = {}
189 snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],
190 dayfirst=True))
191 netFlow = (row['Total_Str_Let_Load_MW'] +
192 row['Total_Enn_Cor_Load_MW'] +
193 row['Total_Tan_Lou_Load_MW'])
194 snapshot['netFlow'] = -1 * (netFlow)
195 snapshot['source'] = 'soni.ltd.uk'
196 snapshot['sortedZoneKeys'] = 'GB-NIR->IE'
197 datapoints.append(snapshot)
198
199 return datapoints
200
201
202 def merge_production(thermal_data, wind_data):
203 """
204 Joins thermal and wind production data on shared datetime key.
205 Returns a list.
206 """
207
208 total_production = thermal_data + wind_data
209
210 # Join thermal and wind dicts on 'datetime' key.
211 d = defaultdict(dict)
212 for elem in total_production:
213 d[elem['datetime']].update(elem)
214
215 joined_data = sorted(d.values(), key=itemgetter("datetime"))
216
217 for datapoint in joined_data:
218 datapoint['datetime'] = add_default_tz(parser.parse(datapoint['datetime'], dayfirst=True))
219
220 return joined_data
221
222
223 def fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,
224 logger=logging.getLogger(__name__)):
225 """
226 Requests the last known production mix (in MW) of a given country
227 Arguments:
228 zone_key (optional) -- used in case a parser is able to fetch multiple countries
229 session (optional) -- request session passed in order to re-use an existing session
230 Return:
231 A dictionary in the form:
232 {
233 'zoneKey': 'FR',
234 'datetime': '2017-01-01T00:00:00Z',
235 'production': {
236 'biomass': 0.0,
237 'coal': 0.0,
238 'gas': 0.0,
239 'hydro': 0.0,
240 'nuclear': null,
241 'oil': 0.0,
242 'solar': 0.0,
243 'wind': 0.0,
244 'geothermal': 0.0,
245 'unknown': 0.0
246 },
247 'storage': {
248 'hydro': -10.0,
249 },
250 'source': 'mysource.com'
251 }
252 """
253 if target_datetime:
254 raise NotImplementedError('This parser is not yet able to parse past dates')
255
256 thermal_data = get_data(thermal_url)
257 wind_data = get_data(wind_url)
258 thermal_df = create_thermal_df(thermal_data)
259 wind_df = create_wind_df(wind_data)
260 thermal = thermal_processor(thermal_df)
261 wind = wind_processor(wind_df)
262 merge = merge_production(thermal, wind)
263
264 production_mix_by_quarter_hour = []
265
266 for datapoint in merge:
267 production_mix = {
268 'zoneKey': zone_key,
269 'datetime': datapoint.get('datetime', 0.0),
270 'production': {
271 'coal': datapoint.get('coal', 0.0),
272 'gas': datapoint.get('gas', 0.0),
273 'oil': datapoint.get('oil', 0.0),
274 'solar': None,
275 'wind': datapoint.get('wind', 0.0)
276 },
277 'source': 'soni.ltd.uk'
278 }
279 production_mix_by_quarter_hour.append(
280 validate(production_mix, logger=logger, required=['gas', 'coal'], floor=1.0))
281
282 return production_mix_by_quarter_hour
283
284
285 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
286 """Requests the last known power exchange (in MW) between two countries
287 Arguments:
288 zone_key (optional) -- used in case a parser is able to fetch multiple countries
289 session (optional) -- request session passed in order to re-use an existing session
290 Return:
291 A dictionary in the form:
292 {
293 'sortedZoneKeys': 'DK->NO',
294 'datetime': '2017-01-01T00:00:00Z',
295 'netFlow': 0.0,
296 'source': 'mysource.com'
297 }
298 """
299 if target_datetime:
300 raise NotImplementedError('This parser is not yet able to parse past dates')
301
302 exchange_data = get_data(exchange_url)
303 exchange_dataframe = create_exchange_df(exchange_data)
304 if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':
305 moyle = moyle_processor(exchange_dataframe)
306 return moyle
307 elif '->'.join(sorted([zone_key1, zone_key2])) == 'GB-NIR->IE':
308 IE = IE_processor(exchange_dataframe)
309 return IE
310 else:
311 raise NotImplementedError('This exchange pair is not implemented')
312
313
314 if __name__ == '__main__':
315 """Main method, never used by the Electricity Map backend, but handy for testing."""
316
317 print('fetch_production() ->')
318 print(fetch_production())
319 print('fetch_exchange(GB-NIR, GB) ->')
320 print(fetch_exchange('GB-NIR', 'GB'))
321 print('fetch_exchange(GB-NIR, IE) ->')
322 print(fetch_exchange('GB-NIR', 'IE'))
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/GB_NIR.py b/parsers/GB_NIR.py
--- a/parsers/GB_NIR.py
+++ b/parsers/GB_NIR.py
@@ -20,7 +20,7 @@
# Negative value represent exports from Northern Ireland.
-def get_data(url, session=None):
+def get_data(url, target_datetime, session=None):
"""
Requests data from a specified url in CSV format.
Returns a response.text object.
@@ -42,10 +42,15 @@
eventvalidation = soup.find("input", attrs={'id': '__EVENTVALIDATION'})['value']
# Set date for post request.
- current_date = datetime.now().date()
- month = current_date.month
- day = current_date.day
- year = current_date.year
+ if target_datetime:
+ target_date = target_datetime.date()
+ else:
+ # get the latest data
+ target_date = datetime.now().date()
+
+ month = target_date.month
+ day = target_date.day
+ year = target_date.year
FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],"01%s-%s-%s-0-0-0-0"]' % (year, month, day, '', year, month, day)
ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],"01%s-%s-%s-0-0-0-0"]' % (year, month, day, '', year, month, day)
@@ -250,11 +255,9 @@
'source': 'mysource.com'
}
"""
- if target_datetime:
- raise NotImplementedError('This parser is not yet able to parse past dates')
- thermal_data = get_data(thermal_url)
- wind_data = get_data(wind_url)
+ thermal_data = get_data(thermal_url, target_datetime)
+ wind_data = get_data(wind_url, target_datetime)
thermal_df = create_thermal_df(thermal_data)
wind_df = create_wind_df(wind_data)
thermal = thermal_processor(thermal_df)
@@ -296,10 +299,8 @@
'source': 'mysource.com'
}
"""
- if target_datetime:
- raise NotImplementedError('This parser is not yet able to parse past dates')
- exchange_data = get_data(exchange_url)
+ exchange_data = get_data(exchange_url, target_datetime)
exchange_dataframe = create_exchange_df(exchange_data)
if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':
moyle = moyle_processor(exchange_dataframe)
|
{"golden_diff": "diff --git a/parsers/GB_NIR.py b/parsers/GB_NIR.py\n--- a/parsers/GB_NIR.py\n+++ b/parsers/GB_NIR.py\n@@ -20,7 +20,7 @@\n # Negative value represent exports from Northern Ireland.\n \n \n-def get_data(url, session=None):\n+def get_data(url, target_datetime, session=None):\n \"\"\"\n Requests data from a specified url in CSV format.\n Returns a response.text object.\n@@ -42,10 +42,15 @@\n eventvalidation = soup.find(\"input\", attrs={'id': '__EVENTVALIDATION'})['value']\n \n # Set date for post request.\n- current_date = datetime.now().date()\n- month = current_date.month\n- day = current_date.day\n- year = current_date.year\n+ if target_datetime:\n+ target_date = target_datetime.date()\n+ else:\n+ # get the latest data\n+ target_date = datetime.now().date()\n+\n+ month = target_date.month\n+ day = target_date.day\n+ year = target_date.year\n \n FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n@@ -250,11 +255,9 @@\n 'source': 'mysource.com'\n }\n \"\"\"\n- if target_datetime:\n- raise NotImplementedError('This parser is not yet able to parse past dates')\n \n- thermal_data = get_data(thermal_url)\n- wind_data = get_data(wind_url)\n+ thermal_data = get_data(thermal_url, target_datetime)\n+ wind_data = get_data(wind_url, target_datetime)\n thermal_df = create_thermal_df(thermal_data)\n wind_df = create_wind_df(wind_data)\n thermal = thermal_processor(thermal_df)\n@@ -296,10 +299,8 @@\n 'source': 'mysource.com'\n }\n \"\"\"\n- if target_datetime:\n- raise NotImplementedError('This parser is not yet able to parse past dates')\n \n- exchange_data = get_data(exchange_url)\n+ exchange_data = get_data(exchange_url, target_datetime)\n exchange_dataframe = create_exchange_df(exchange_data)\n if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':\n moyle = moyle_processor(exchange_dataframe)\n", "issue": "GB-NIR fetch historical data available with 15-min \nUsing the same sources than current parser for live data.\r\n\r\nhttp://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx\r\nhttp://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx\r\nhttp://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx\r\n\r\nQuite interesting as current database is 80% empty for NIR\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom io import StringIO\nfrom operator import itemgetter\n\nimport logging\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser, tz\n\nfrom .lib.validation import validate\n\nthermal_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx'\nwind_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx'\nexchange_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx'\n# Positive values represent imports to Northern Ireland.\n# Negative value represent exports from Northern Ireland.\n\n\ndef get_data(url, session=None):\n \"\"\"\n Requests data from a specified url in CSV format.\n Returns a response.text object.\n \"\"\"\n\n s = session or requests.Session()\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\n }\n\n pagereq = requests.get(url, headers=headers)\n soup = BeautifulSoup(pagereq.text, 'html.parser')\n\n # Find and define parameters needed to send a POST request for the actual data.\n viewstategenerator = soup.find(\"input\", attrs={'id': '__VIEWSTATEGENERATOR'})['value']\n viewstate = soup.find(\"input\", attrs={'id': '__VIEWSTATE'})['value']\n eventvalidation = soup.find(\"input\", attrs={'id': '__EVENTVALIDATION'})['value']\n\n # Set date for post request.\n current_date = datetime.now().date()\n month = current_date.month\n day = current_date.day\n year = current_date.year\n\n FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n btnDownloadCSV = 'Download+CSV'\n ig_def_dp_cal_clientState = '|0|15,2017,09,2017,%s,%s||[[null,[],null],[{%s},[]],\"11,2017,09,2017,%s,%s\"]' % (month, day, '', month, day)\n IG_CSS_LINKS_ = 'ig_res/default/ig_monthcalendar.css|ig_res/default/ig_texteditor.css|ig_res/default/ig_shared.css'\n\n postdata = {'__VIEWSTATE': viewstate,\n '__VIEWSTATEGENERATOR': viewstategenerator,\n '__EVENTVALIDATION': eventvalidation,\n 'FromDatePicker_clientState': FromDatePicker_clientState,\n 'ToDatePicker_clientState': ToDatePicker_clientState,\n 'btnDownloadCSV': btnDownloadCSV,\n '_ig_def_dp_cal_clientState': ig_def_dp_cal_clientState,\n '_IG_CSS_LINKS_': IG_CSS_LINKS_\n }\n\n postheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n datareq = s.post(url, headers=postheaders, data=postdata)\n\n return datareq.text\n\n\ndef add_default_tz(timestamp):\n \"\"\"\n Adds Northern Ireland timezone to datetime object if tz = None.\n \"\"\"\n\n NIR = tz.gettz('Europe/Belfast')\n modified_timestamp = timestamp.replace(tzinfo=timestamp.tzinfo or NIR)\n\n return modified_timestamp\n\n\ndef create_thermal_df(text_data):\n \"\"\"\n Turns thermal csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1, 2, 3, 4, 5]\n df_thermal = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_thermal.fillna(0.0, inplace=True)\n\n return df_thermal\n\n\ndef create_wind_df(text_data):\n \"\"\"\n Turns wind csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1]\n df_wind = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_wind.fillna(0.0, inplace=True)\n\n return df_wind\n\n\ndef create_exchange_df(text_data):\n \"\"\"\n Turns exchange csv data into a usable dataframe.\n \"\"\"\n\n df_exchange = pd.read_csv(StringIO(text_data))\n df_exchange.fillna(0.0, inplace=True)\n\n return df_exchange\n\n\ndef thermal_processor(df):\n \"\"\"\n Creates quarter hour datapoints for thermal production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['gas'] = row['Gas_MW']\n snapshot['coal'] = row['Coal_MW']\n snapshot['oil'] = row['Distillate_MW'] + row['Diesel_MW']\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef wind_processor(df):\n \"\"\"\n Creates quarter hour datapoints for wind production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['wind'] = row['Total_Wind_Generated_MW']\n if snapshot['wind'] > -20:\n snapshot['wind'] = max(snapshot['wind'], 0)\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef moyle_processor(df):\n \"\"\"\n Creates quarter hour datapoints for GB exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n snapshot['netFlow'] = row['Total_Moyle_Load_MW']\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB->GB-NIR'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef IE_processor(df):\n \"\"\"\n Creates quarter hour datapoints for IE exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n netFlow = (row['Total_Str_Let_Load_MW'] +\n row['Total_Enn_Cor_Load_MW'] +\n row['Total_Tan_Lou_Load_MW'])\n snapshot['netFlow'] = -1 * (netFlow)\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB-NIR->IE'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef merge_production(thermal_data, wind_data):\n \"\"\"\n Joins thermal and wind production data on shared datetime key.\n Returns a list.\n \"\"\"\n\n total_production = thermal_data + wind_data\n\n # Join thermal and wind dicts on 'datetime' key.\n d = defaultdict(dict)\n for elem in total_production:\n d[elem['datetime']].update(elem)\n\n joined_data = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n for datapoint in joined_data:\n datapoint['datetime'] = add_default_tz(parser.parse(datapoint['datetime'], dayfirst=True))\n\n return joined_data\n\n\ndef fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n thermal_data = get_data(thermal_url)\n wind_data = get_data(wind_url)\n thermal_df = create_thermal_df(thermal_data)\n wind_df = create_wind_df(wind_data)\n thermal = thermal_processor(thermal_df)\n wind = wind_processor(wind_df)\n merge = merge_production(thermal, wind)\n\n production_mix_by_quarter_hour = []\n\n for datapoint in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': datapoint.get('datetime', 0.0),\n 'production': {\n 'coal': datapoint.get('coal', 0.0),\n 'gas': datapoint.get('gas', 0.0),\n 'oil': datapoint.get('oil', 0.0),\n 'solar': None,\n 'wind': datapoint.get('wind', 0.0)\n },\n 'source': 'soni.ltd.uk'\n }\n production_mix_by_quarter_hour.append(\n validate(production_mix, logger=logger, required=['gas', 'coal'], floor=1.0))\n\n return production_mix_by_quarter_hour\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n exchange_data = get_data(exchange_url)\n exchange_dataframe = create_exchange_df(exchange_data)\n if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':\n moyle = moyle_processor(exchange_dataframe)\n return moyle\n elif '->'.join(sorted([zone_key1, zone_key2])) == 'GB-NIR->IE':\n IE = IE_processor(exchange_dataframe)\n return IE\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(GB-NIR, GB) ->')\n print(fetch_exchange('GB-NIR', 'GB'))\n print('fetch_exchange(GB-NIR, IE) ->')\n print(fetch_exchange('GB-NIR', 'IE'))\n", "path": "parsers/GB_NIR.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom io import StringIO\nfrom operator import itemgetter\n\nimport logging\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser, tz\n\nfrom .lib.validation import validate\n\nthermal_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/FuelMix.aspx'\nwind_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/SystemOutput.aspx'\nexchange_url = 'http://ws.soni.ltd.uk/DownloadCentre/aspx/MoyleTie.aspx'\n# Positive values represent imports to Northern Ireland.\n# Negative value represent exports from Northern Ireland.\n\n\ndef get_data(url, target_datetime, session=None):\n \"\"\"\n Requests data from a specified url in CSV format.\n Returns a response.text object.\n \"\"\"\n\n s = session or requests.Session()\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'\n }\n\n pagereq = requests.get(url, headers=headers)\n soup = BeautifulSoup(pagereq.text, 'html.parser')\n\n # Find and define parameters needed to send a POST request for the actual data.\n viewstategenerator = soup.find(\"input\", attrs={'id': '__VIEWSTATEGENERATOR'})['value']\n viewstate = soup.find(\"input\", attrs={'id': '__VIEWSTATE'})['value']\n eventvalidation = soup.find(\"input\", attrs={'id': '__EVENTVALIDATION'})['value']\n\n # Set date for post request.\n if target_datetime:\n target_date = target_datetime.date()\n else:\n # get the latest data\n target_date = datetime.now().date()\n\n month = target_date.month\n day = target_date.day\n year = target_date.year\n\n FromDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n ToDatePicker_clientState = '|0|01%s-%s-%s-0-0-0-0||[[[[]],[],[]],[{%s},[]],\"01%s-%s-%s-0-0-0-0\"]' % (year, month, day, '', year, month, day)\n btnDownloadCSV = 'Download+CSV'\n ig_def_dp_cal_clientState = '|0|15,2017,09,2017,%s,%s||[[null,[],null],[{%s},[]],\"11,2017,09,2017,%s,%s\"]' % (month, day, '', month, day)\n IG_CSS_LINKS_ = 'ig_res/default/ig_monthcalendar.css|ig_res/default/ig_texteditor.css|ig_res/default/ig_shared.css'\n\n postdata = {'__VIEWSTATE': viewstate,\n '__VIEWSTATEGENERATOR': viewstategenerator,\n '__EVENTVALIDATION': eventvalidation,\n 'FromDatePicker_clientState': FromDatePicker_clientState,\n 'ToDatePicker_clientState': ToDatePicker_clientState,\n 'btnDownloadCSV': btnDownloadCSV,\n '_ig_def_dp_cal_clientState': ig_def_dp_cal_clientState,\n '_IG_CSS_LINKS_': IG_CSS_LINKS_\n }\n\n postheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\n datareq = s.post(url, headers=postheaders, data=postdata)\n\n return datareq.text\n\n\ndef add_default_tz(timestamp):\n \"\"\"\n Adds Northern Ireland timezone to datetime object if tz = None.\n \"\"\"\n\n NIR = tz.gettz('Europe/Belfast')\n modified_timestamp = timestamp.replace(tzinfo=timestamp.tzinfo or NIR)\n\n return modified_timestamp\n\n\ndef create_thermal_df(text_data):\n \"\"\"\n Turns thermal csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1, 2, 3, 4, 5]\n df_thermal = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_thermal.fillna(0.0, inplace=True)\n\n return df_thermal\n\n\ndef create_wind_df(text_data):\n \"\"\"\n Turns wind csv data into a usable dataframe.\n \"\"\"\n\n cols_to_use = [0, 1]\n df_wind = pd.read_csv(StringIO(text_data),\n usecols=cols_to_use)\n df_wind.fillna(0.0, inplace=True)\n\n return df_wind\n\n\ndef create_exchange_df(text_data):\n \"\"\"\n Turns exchange csv data into a usable dataframe.\n \"\"\"\n\n df_exchange = pd.read_csv(StringIO(text_data))\n df_exchange.fillna(0.0, inplace=True)\n\n return df_exchange\n\n\ndef thermal_processor(df):\n \"\"\"\n Creates quarter hour datapoints for thermal production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['gas'] = row['Gas_MW']\n snapshot['coal'] = row['Coal_MW']\n snapshot['oil'] = row['Distillate_MW'] + row['Diesel_MW']\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef wind_processor(df):\n \"\"\"\n Creates quarter hour datapoints for wind production.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = row['TimeStamp']\n snapshot['wind'] = row['Total_Wind_Generated_MW']\n if snapshot['wind'] > -20:\n snapshot['wind'] = max(snapshot['wind'], 0)\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef moyle_processor(df):\n \"\"\"\n Creates quarter hour datapoints for GB exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n snapshot['netFlow'] = row['Total_Moyle_Load_MW']\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB->GB-NIR'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef IE_processor(df):\n \"\"\"\n Creates quarter hour datapoints for IE exchange.\n Returns a list.\n \"\"\"\n\n datapoints = []\n for index, row in df.iterrows():\n snapshot = {}\n snapshot['datetime'] = add_default_tz(parser.parse(row['TimeStamp'],\n dayfirst=True))\n netFlow = (row['Total_Str_Let_Load_MW'] +\n row['Total_Enn_Cor_Load_MW'] +\n row['Total_Tan_Lou_Load_MW'])\n snapshot['netFlow'] = -1 * (netFlow)\n snapshot['source'] = 'soni.ltd.uk'\n snapshot['sortedZoneKeys'] = 'GB-NIR->IE'\n datapoints.append(snapshot)\n\n return datapoints\n\n\ndef merge_production(thermal_data, wind_data):\n \"\"\"\n Joins thermal and wind production data on shared datetime key.\n Returns a list.\n \"\"\"\n\n total_production = thermal_data + wind_data\n\n # Join thermal and wind dicts on 'datetime' key.\n d = defaultdict(dict)\n for elem in total_production:\n d[elem['datetime']].update(elem)\n\n joined_data = sorted(d.values(), key=itemgetter(\"datetime\"))\n\n for datapoint in joined_data:\n datapoint['datetime'] = add_default_tz(parser.parse(datapoint['datetime'], dayfirst=True))\n\n return joined_data\n\n\ndef fetch_production(zone_key='GB-NIR', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n thermal_data = get_data(thermal_url, target_datetime)\n wind_data = get_data(wind_url, target_datetime)\n thermal_df = create_thermal_df(thermal_data)\n wind_df = create_wind_df(wind_data)\n thermal = thermal_processor(thermal_df)\n wind = wind_processor(wind_df)\n merge = merge_production(thermal, wind)\n\n production_mix_by_quarter_hour = []\n\n for datapoint in merge:\n production_mix = {\n 'zoneKey': zone_key,\n 'datetime': datapoint.get('datetime', 0.0),\n 'production': {\n 'coal': datapoint.get('coal', 0.0),\n 'gas': datapoint.get('gas', 0.0),\n 'oil': datapoint.get('oil', 0.0),\n 'solar': None,\n 'wind': datapoint.get('wind', 0.0)\n },\n 'source': 'soni.ltd.uk'\n }\n production_mix_by_quarter_hour.append(\n validate(production_mix, logger=logger, required=['gas', 'coal'], floor=1.0))\n\n return production_mix_by_quarter_hour\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n\n exchange_data = get_data(exchange_url, target_datetime)\n exchange_dataframe = create_exchange_df(exchange_data)\n if '->'.join(sorted([zone_key1, zone_key2])) == 'GB->GB-NIR':\n moyle = moyle_processor(exchange_dataframe)\n return moyle\n elif '->'.join(sorted([zone_key1, zone_key2])) == 'GB-NIR->IE':\n IE = IE_processor(exchange_dataframe)\n return IE\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(GB-NIR, GB) ->')\n print(fetch_exchange('GB-NIR', 'GB'))\n print('fetch_exchange(GB-NIR, IE) ->')\n print(fetch_exchange('GB-NIR', 'IE'))\n", "path": "parsers/GB_NIR.py"}]}
| 4,010 | 635 |
gh_patches_debug_477
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5786
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pypy3-pinned OpenSSL error
Yet another CI issue, visible in https://github.com/scrapy/scrapy/actions/runs/3849823417/jobs/6559259481
> /home/runner/work/scrapy/scrapy/.tox/pypy3-pinned/site-packages/cryptography/hazmat/bindings/_openssl.pypy37-pp73-x86_64-linux-gnu.so: undefined symbol: FIPS_mode
This may be a problem with that specific binary distribution (it's `cryptography==3.3`), not sure why it worked before, maybe something was rebuilt recently.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from pathlib import Path
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 version = (Path(__file__).parent / 'scrapy/VERSION').read_text('ascii').strip()
7
8
9 def has_environment_marker_platform_impl_support():
10 """Code extracted from 'pytest/setup.py'
11 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
12
13 The first known release to support environment marker with range operators
14 it is 18.5, see:
15 https://setuptools.readthedocs.io/en/latest/history.html#id235
16 """
17 return parse_version(setuptools_version) >= parse_version('18.5')
18
19
20 install_requires = [
21 'Twisted>=18.9.0',
22 'cryptography>=3.3',
23 'cssselect>=0.9.1',
24 'itemloaders>=1.0.1',
25 'parsel>=1.5.0',
26 'pyOpenSSL>=21.0.0',
27 'queuelib>=1.4.2',
28 'service_identity>=18.1.0',
29 'w3lib>=1.17.0',
30 'zope.interface>=5.1.0',
31 'protego>=0.1.15',
32 'itemadapter>=0.1.0',
33 'setuptools',
34 'packaging',
35 'tldextract',
36 'lxml>=4.3.0',
37 ]
38 extras_require = {}
39 cpython_dependencies = [
40 'PyDispatcher>=2.0.5',
41 ]
42 if has_environment_marker_platform_impl_support():
43 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
44 extras_require[':platform_python_implementation == "PyPy"'] = [
45 'PyPyDispatcher>=2.1.0',
46 ]
47 else:
48 install_requires.extend(cpython_dependencies)
49
50
51 setup(
52 name='Scrapy',
53 version=version,
54 url='https://scrapy.org',
55 project_urls={
56 'Documentation': 'https://docs.scrapy.org/',
57 'Source': 'https://github.com/scrapy/scrapy',
58 'Tracker': 'https://github.com/scrapy/scrapy/issues',
59 },
60 description='A high-level Web Crawling and Web Scraping framework',
61 long_description=open('README.rst', encoding="utf-8").read(),
62 author='Scrapy developers',
63 author_email='[email protected]',
64 maintainer='Pablo Hoffman',
65 maintainer_email='[email protected]',
66 license='BSD',
67 packages=find_packages(exclude=('tests', 'tests.*')),
68 include_package_data=True,
69 zip_safe=False,
70 entry_points={
71 'console_scripts': ['scrapy = scrapy.cmdline:execute']
72 },
73 classifiers=[
74 'Framework :: Scrapy',
75 'Development Status :: 5 - Production/Stable',
76 'Environment :: Console',
77 'Intended Audience :: Developers',
78 'License :: OSI Approved :: BSD License',
79 'Operating System :: OS Independent',
80 'Programming Language :: Python',
81 'Programming Language :: Python :: 3',
82 'Programming Language :: Python :: 3.7',
83 'Programming Language :: Python :: 3.8',
84 'Programming Language :: Python :: 3.9',
85 'Programming Language :: Python :: 3.10',
86 'Programming Language :: Python :: 3.11',
87 'Programming Language :: Python :: Implementation :: CPython',
88 'Programming Language :: Python :: Implementation :: PyPy',
89 'Topic :: Internet :: WWW/HTTP',
90 'Topic :: Software Development :: Libraries :: Application Frameworks',
91 'Topic :: Software Development :: Libraries :: Python Modules',
92 ],
93 python_requires='>=3.7',
94 install_requires=install_requires,
95 extras_require=extras_require,
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@
install_requires = [
'Twisted>=18.9.0',
- 'cryptography>=3.3',
+ 'cryptography>=3.4.6',
'cssselect>=0.9.1',
'itemloaders>=1.0.1',
'parsel>=1.5.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n install_requires = [\n 'Twisted>=18.9.0',\n- 'cryptography>=3.3',\n+ 'cryptography>=3.4.6',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n", "issue": "pypy3-pinned OpenSSL error\nYet another CI issue, visible in https://github.com/scrapy/scrapy/actions/runs/3849823417/jobs/6559259481\r\n\r\n> /home/runner/work/scrapy/scrapy/.tox/pypy3-pinned/site-packages/cryptography/hazmat/bindings/_openssl.pypy37-pp73-x86_64-linux-gnu.so: undefined symbol: FIPS_mode\r\n\r\nThis may be a problem with that specific binary distribution (it's `cryptography==3.3`), not sure why it worked before, maybe something was rebuilt recently. \n", "before_files": [{"content": "from pathlib import Path\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nversion = (Path(__file__).parent / 'scrapy/VERSION').read_text('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=18.9.0',\n 'cryptography>=3.3',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=21.0.0',\n 'queuelib>=1.4.2',\n 'service_identity>=18.1.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'packaging',\n 'tldextract',\n 'lxml>=4.3.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst', encoding=\"utf-8\").read(),\n author='Scrapy developers',\n author_email='[email protected]',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from pathlib import Path\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nversion = (Path(__file__).parent / 'scrapy/VERSION').read_text('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=18.9.0',\n 'cryptography>=3.4.6',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=21.0.0',\n 'queuelib>=1.4.2',\n 'service_identity>=18.1.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'packaging',\n 'tldextract',\n 'lxml>=4.3.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst', encoding=\"utf-8\").read(),\n author='Scrapy developers',\n author_email='[email protected]',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
| 1,421 | 105 |
gh_patches_debug_28254
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-722
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename EfficientDet Backbones
rename EfficientDet Backbones by deleting the `efficientdet_` part from the names: no need to reference that because the model already suggests that
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/ross/efficientdet/backbones.py`
Content:
```
1 __all__ = [
2 "tf_efficientdet_lite0",
3 "efficientdet_d0",
4 "efficientdet_d1",
5 "efficientdet_d2",
6 "efficientdet_d3",
7 "efficientdet_d4",
8 "efficientdet_d5",
9 "efficientdet_d6",
10 "efficientdet_d7",
11 "efficientdet_d7x",
12 ]
13
14 from icevision.models.ross.efficientdet.utils import *
15
16
17 tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
18
19 efficientdet_d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
20
21 efficientdet_d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
22
23 efficientdet_d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
24
25 efficientdet_d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
26
27 efficientdet_d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
28
29 efficientdet_d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
30
31 efficientdet_d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
32
33 efficientdet_d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
34
35 efficientdet_d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py
--- a/icevision/models/ross/efficientdet/backbones.py
+++ b/icevision/models/ross/efficientdet/backbones.py
@@ -1,35 +1,35 @@
__all__ = [
- "tf_efficientdet_lite0",
- "efficientdet_d0",
- "efficientdet_d1",
- "efficientdet_d2",
- "efficientdet_d3",
- "efficientdet_d4",
- "efficientdet_d5",
- "efficientdet_d6",
- "efficientdet_d7",
- "efficientdet_d7x",
+ "tf_lite0",
+ "d0",
+ "d1",
+ "d2",
+ "d3",
+ "d4",
+ "d5",
+ "d6",
+ "d7",
+ "d7x",
]
from icevision.models.ross.efficientdet.utils import *
-tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
+tf_lite0 = EfficientDetBackboneConfig(model_name="tf_efficientdet_lite0")
-efficientdet_d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
+d0 = EfficientDetBackboneConfig(model_name="efficientdet_d0")
-efficientdet_d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
+d1 = EfficientDetBackboneConfig(model_name="efficientdet_d1")
-efficientdet_d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
+d2 = EfficientDetBackboneConfig(model_name="efficientdet_d2")
-efficientdet_d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
+d3 = EfficientDetBackboneConfig(model_name="efficientdet_d3")
-efficientdet_d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
+d4 = EfficientDetBackboneConfig(model_name="efficientdet_d4")
-efficientdet_d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
+d5 = EfficientDetBackboneConfig(model_name="efficientdet_d5")
-efficientdet_d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
+d6 = EfficientDetBackboneConfig(model_name="efficientdet_d6")
-efficientdet_d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
+d7 = EfficientDetBackboneConfig(model_name="efficientdet_d7")
-efficientdet_d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
+d7x = EfficientDetBackboneConfig(model_name="efficientdet_d7x")
|
{"golden_diff": "diff --git a/icevision/models/ross/efficientdet/backbones.py b/icevision/models/ross/efficientdet/backbones.py\n--- a/icevision/models/ross/efficientdet/backbones.py\n+++ b/icevision/models/ross/efficientdet/backbones.py\n@@ -1,35 +1,35 @@\n __all__ = [\n- \"tf_efficientdet_lite0\",\n- \"efficientdet_d0\",\n- \"efficientdet_d1\",\n- \"efficientdet_d2\",\n- \"efficientdet_d3\",\n- \"efficientdet_d4\",\n- \"efficientdet_d5\",\n- \"efficientdet_d6\",\n- \"efficientdet_d7\",\n- \"efficientdet_d7x\",\n+ \"tf_lite0\",\n+ \"d0\",\n+ \"d1\",\n+ \"d2\",\n+ \"d3\",\n+ \"d4\",\n+ \"d5\",\n+ \"d6\",\n+ \"d7\",\n+ \"d7x\",\n ]\n \n from icevision.models.ross.efficientdet.utils import *\n \n \n-tf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n+tf_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n \n-efficientdet_d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n+d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n \n-efficientdet_d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n+d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n \n-efficientdet_d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n+d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n \n-efficientdet_d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n+d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n \n-efficientdet_d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n+d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n \n-efficientdet_d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n+d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n \n-efficientdet_d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n+d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n \n-efficientdet_d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n+d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n \n-efficientdet_d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n+d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "issue": "Rename EfficientDet Backbones\nrename EfficientDet Backbones by deleting the `efficientdet_` part from the names: no need to reference that because the model already suggests that\r\n\n", "before_files": [{"content": "__all__ = [\n \"tf_efficientdet_lite0\",\n \"efficientdet_d0\",\n \"efficientdet_d1\",\n \"efficientdet_d2\",\n \"efficientdet_d3\",\n \"efficientdet_d4\",\n \"efficientdet_d5\",\n \"efficientdet_d6\",\n \"efficientdet_d7\",\n \"efficientdet_d7x\",\n]\n\nfrom icevision.models.ross.efficientdet.utils import *\n\n\ntf_efficientdet_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n\nefficientdet_d0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n\nefficientdet_d1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n\nefficientdet_d2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n\nefficientdet_d3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n\nefficientdet_d4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n\nefficientdet_d5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n\nefficientdet_d6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n\nefficientdet_d7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n\nefficientdet_d7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "path": "icevision/models/ross/efficientdet/backbones.py"}], "after_files": [{"content": "__all__ = [\n \"tf_lite0\",\n \"d0\",\n \"d1\",\n \"d2\",\n \"d3\",\n \"d4\",\n \"d5\",\n \"d6\",\n \"d7\",\n \"d7x\",\n]\n\nfrom icevision.models.ross.efficientdet.utils import *\n\n\ntf_lite0 = EfficientDetBackboneConfig(model_name=\"tf_efficientdet_lite0\")\n\nd0 = EfficientDetBackboneConfig(model_name=\"efficientdet_d0\")\n\nd1 = EfficientDetBackboneConfig(model_name=\"efficientdet_d1\")\n\nd2 = EfficientDetBackboneConfig(model_name=\"efficientdet_d2\")\n\nd3 = EfficientDetBackboneConfig(model_name=\"efficientdet_d3\")\n\nd4 = EfficientDetBackboneConfig(model_name=\"efficientdet_d4\")\n\nd5 = EfficientDetBackboneConfig(model_name=\"efficientdet_d5\")\n\nd6 = EfficientDetBackboneConfig(model_name=\"efficientdet_d6\")\n\nd7 = EfficientDetBackboneConfig(model_name=\"efficientdet_d7\")\n\nd7x = EfficientDetBackboneConfig(model_name=\"efficientdet_d7x\")\n", "path": "icevision/models/ross/efficientdet/backbones.py"}]}
| 655 | 600 |
gh_patches_debug_32768
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-972
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fixtures loads 8 times on docker-compose up
### Current Behavior
When starting up the application, the console log shows that the fixtures file is executed 8 times in a row. However, current logic on the fixtures file is preventing duplicate entries in the database.
### Expected Behavior
Data should just be loaded into the db once when using fixtures.
### Steps to Reproduce
1. locally run docker-compose up
2. observe the logs
3. see that the logs for loading domain application, users, etc are repeated 8 times each. Also note the warning messages that show an attempt was made to enter data that already exists in the db.
### Environment
local & on sandbox (see the deploy logs)
### Additional Context
see this [slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1687988129781299)
### Issue Links
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/management/commands/load.py`
Content:
```
1 import logging
2
3 from django.core.management.base import BaseCommand
4 from auditlog.context import disable_auditlog # type: ignore
5
6 from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture
7
8 logger = logging.getLogger(__name__)
9
10
11 class Command(BaseCommand):
12 def handle(self, *args, **options):
13 # django-auditlog has some bugs with fixtures
14 # https://github.com/jazzband/django-auditlog/issues/17
15 with disable_auditlog():
16 UserFixture.load()
17 DomainApplicationFixture.load()
18 DomainFixture.load()
19 logger.info("All fixtures loaded.")
20
```
Path: `src/registrar/signals.py`
Content:
```
1 import logging
2
3 from django.conf import settings
4 from django.core.management import call_command
5 from django.db.models.signals import post_save, post_migrate
6 from django.dispatch import receiver
7
8 from .models import User, Contact
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @receiver(post_save, sender=User)
15 def handle_profile(sender, instance, **kwargs):
16 """Method for when a User is saved.
17
18 A first time registrant may have been invited, so we'll search for a matching
19 Contact record, by email address, and associate them, if possible.
20
21 A first time registrant may not have a matching Contact, so we'll create one,
22 copying the contact values we received from Login.gov in order to initialize it.
23
24 During subsequent login, a User record may be updated with new data from Login.gov,
25 but in no case will we update contact values on an existing Contact record.
26 """
27
28 first_name = getattr(instance, "first_name", "")
29 last_name = getattr(instance, "last_name", "")
30 email = getattr(instance, "email", "")
31 phone = getattr(instance, "phone", "")
32
33 is_new_user = kwargs.get("created", False)
34
35 if is_new_user:
36 contacts = Contact.objects.filter(email=email)
37 else:
38 contacts = Contact.objects.filter(user=instance)
39
40 if len(contacts) == 0: # no matching contact
41 Contact.objects.create(
42 user=instance,
43 first_name=first_name,
44 last_name=last_name,
45 email=email,
46 phone=phone,
47 )
48
49 if len(contacts) >= 1 and is_new_user: # a matching contact
50 contacts[0].user = instance
51 contacts[0].save()
52
53 if len(contacts) > 1: # multiple matches
54 logger.warning(
55 "There are multiple Contacts with the same email address."
56 f" Picking #{contacts[0].id} for User #{instance.id}."
57 )
58
59
60 @receiver(post_migrate)
61 def handle_loaddata(**kwargs):
62 """Attempt to load test fixtures when in DEBUG mode."""
63 if settings.DEBUG:
64 try:
65 call_command("load")
66 except Exception as e:
67 logger.warning(e)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/registrar/management/commands/load.py b/src/registrar/management/commands/load.py
--- a/src/registrar/management/commands/load.py
+++ b/src/registrar/management/commands/load.py
@@ -2,6 +2,7 @@
from django.core.management.base import BaseCommand
from auditlog.context import disable_auditlog # type: ignore
+from django.conf import settings
from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture
@@ -12,8 +13,11 @@
def handle(self, *args, **options):
# django-auditlog has some bugs with fixtures
# https://github.com/jazzband/django-auditlog/issues/17
- with disable_auditlog():
- UserFixture.load()
- DomainApplicationFixture.load()
- DomainFixture.load()
- logger.info("All fixtures loaded.")
+ if settings.DEBUG:
+ with disable_auditlog():
+ UserFixture.load()
+ DomainApplicationFixture.load()
+ DomainFixture.load()
+ logger.info("All fixtures loaded.")
+ else:
+ logger.warn("Refusing to load fixture data in a non DEBUG env")
diff --git a/src/registrar/signals.py b/src/registrar/signals.py
--- a/src/registrar/signals.py
+++ b/src/registrar/signals.py
@@ -1,8 +1,6 @@
import logging
-from django.conf import settings
-from django.core.management import call_command
-from django.db.models.signals import post_save, post_migrate
+from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import User, Contact
@@ -55,13 +53,3 @@
"There are multiple Contacts with the same email address."
f" Picking #{contacts[0].id} for User #{instance.id}."
)
-
-
-@receiver(post_migrate)
-def handle_loaddata(**kwargs):
- """Attempt to load test fixtures when in DEBUG mode."""
- if settings.DEBUG:
- try:
- call_command("load")
- except Exception as e:
- logger.warning(e)
|
{"golden_diff": "diff --git a/src/registrar/management/commands/load.py b/src/registrar/management/commands/load.py\n--- a/src/registrar/management/commands/load.py\n+++ b/src/registrar/management/commands/load.py\n@@ -2,6 +2,7 @@\n \n from django.core.management.base import BaseCommand\n from auditlog.context import disable_auditlog # type: ignore\n+from django.conf import settings\n \n from registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture\n \n@@ -12,8 +13,11 @@\n def handle(self, *args, **options):\n # django-auditlog has some bugs with fixtures\n # https://github.com/jazzband/django-auditlog/issues/17\n- with disable_auditlog():\n- UserFixture.load()\n- DomainApplicationFixture.load()\n- DomainFixture.load()\n- logger.info(\"All fixtures loaded.\")\n+ if settings.DEBUG:\n+ with disable_auditlog():\n+ UserFixture.load()\n+ DomainApplicationFixture.load()\n+ DomainFixture.load()\n+ logger.info(\"All fixtures loaded.\")\n+ else:\n+ logger.warn(\"Refusing to load fixture data in a non DEBUG env\")\ndiff --git a/src/registrar/signals.py b/src/registrar/signals.py\n--- a/src/registrar/signals.py\n+++ b/src/registrar/signals.py\n@@ -1,8 +1,6 @@\n import logging\n \n-from django.conf import settings\n-from django.core.management import call_command\n-from django.db.models.signals import post_save, post_migrate\n+from django.db.models.signals import post_save\n from django.dispatch import receiver\n \n from .models import User, Contact\n@@ -55,13 +53,3 @@\n \"There are multiple Contacts with the same email address.\"\n f\" Picking #{contacts[0].id} for User #{instance.id}.\"\n )\n-\n-\n-@receiver(post_migrate)\n-def handle_loaddata(**kwargs):\n- \"\"\"Attempt to load test fixtures when in DEBUG mode.\"\"\"\n- if settings.DEBUG:\n- try:\n- call_command(\"load\")\n- except Exception as e:\n- logger.warning(e)\n", "issue": "Fixtures loads 8 times on docker-compose up\n### Current Behavior\n\nWhen starting up the application, the console log shows that the fixtures file is executed 8 times in a row. However, current logic on the fixtures file is preventing duplicate entries in the database.\n\n### Expected Behavior\n\nData should just be loaded into the db once when using fixtures.\n\n### Steps to Reproduce\n\n1. locally run docker-compose up\r\n2. observe the logs\r\n3. see that the logs for loading domain application, users, etc are repeated 8 times each. Also note the warning messages that show an attempt was made to enter data that already exists in the db.\r\n\n\n### Environment\n\nlocal & on sandbox (see the deploy logs)\n\n### Additional Context\n\nsee this [slack thread](https://cisa-corp.slack.com/archives/C05BGB4L5NF/p1687988129781299)\n\n### Issue Links\n\n_No response_\n", "before_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom auditlog.context import disable_auditlog # type: ignore\n\nfrom registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n # django-auditlog has some bugs with fixtures\n # https://github.com/jazzband/django-auditlog/issues/17\n with disable_auditlog():\n UserFixture.load()\n DomainApplicationFixture.load()\n DomainFixture.load()\n logger.info(\"All fixtures loaded.\")\n", "path": "src/registrar/management/commands/load.py"}, {"content": "import logging\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom django.db.models.signals import post_save, post_migrate\nfrom django.dispatch import receiver\n\nfrom .models import User, Contact\n\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(post_save, sender=User)\ndef handle_profile(sender, instance, **kwargs):\n \"\"\"Method for when a User is saved.\n\n A first time registrant may have been invited, so we'll search for a matching\n Contact record, by email address, and associate them, if possible.\n\n A first time registrant may not have a matching Contact, so we'll create one,\n copying the contact values we received from Login.gov in order to initialize it.\n\n During subsequent login, a User record may be updated with new data from Login.gov,\n but in no case will we update contact values on an existing Contact record.\n \"\"\"\n\n first_name = getattr(instance, \"first_name\", \"\")\n last_name = getattr(instance, \"last_name\", \"\")\n email = getattr(instance, \"email\", \"\")\n phone = getattr(instance, \"phone\", \"\")\n\n is_new_user = kwargs.get(\"created\", False)\n\n if is_new_user:\n contacts = Contact.objects.filter(email=email)\n else:\n contacts = Contact.objects.filter(user=instance)\n\n if len(contacts) == 0: # no matching contact\n Contact.objects.create(\n user=instance,\n first_name=first_name,\n last_name=last_name,\n email=email,\n phone=phone,\n )\n\n if len(contacts) >= 1 and is_new_user: # a matching contact\n contacts[0].user = instance\n contacts[0].save()\n\n if len(contacts) > 1: # multiple matches\n logger.warning(\n \"There are multiple Contacts with the same email address.\"\n f\" Picking #{contacts[0].id} for User #{instance.id}.\"\n )\n\n\n@receiver(post_migrate)\ndef handle_loaddata(**kwargs):\n \"\"\"Attempt to load test fixtures when in DEBUG mode.\"\"\"\n if settings.DEBUG:\n try:\n call_command(\"load\")\n except Exception as e:\n logger.warning(e)\n", "path": "src/registrar/signals.py"}], "after_files": [{"content": "import logging\n\nfrom django.core.management.base import BaseCommand\nfrom auditlog.context import disable_auditlog # type: ignore\nfrom django.conf import settings\n\nfrom registrar.fixtures import UserFixture, DomainApplicationFixture, DomainFixture\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n # django-auditlog has some bugs with fixtures\n # https://github.com/jazzband/django-auditlog/issues/17\n if settings.DEBUG:\n with disable_auditlog():\n UserFixture.load()\n DomainApplicationFixture.load()\n DomainFixture.load()\n logger.info(\"All fixtures loaded.\")\n else:\n logger.warn(\"Refusing to load fixture data in a non DEBUG env\")\n", "path": "src/registrar/management/commands/load.py"}, {"content": "import logging\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom .models import User, Contact\n\n\nlogger = logging.getLogger(__name__)\n\n\n@receiver(post_save, sender=User)\ndef handle_profile(sender, instance, **kwargs):\n \"\"\"Method for when a User is saved.\n\n A first time registrant may have been invited, so we'll search for a matching\n Contact record, by email address, and associate them, if possible.\n\n A first time registrant may not have a matching Contact, so we'll create one,\n copying the contact values we received from Login.gov in order to initialize it.\n\n During subsequent login, a User record may be updated with new data from Login.gov,\n but in no case will we update contact values on an existing Contact record.\n \"\"\"\n\n first_name = getattr(instance, \"first_name\", \"\")\n last_name = getattr(instance, \"last_name\", \"\")\n email = getattr(instance, \"email\", \"\")\n phone = getattr(instance, \"phone\", \"\")\n\n is_new_user = kwargs.get(\"created\", False)\n\n if is_new_user:\n contacts = Contact.objects.filter(email=email)\n else:\n contacts = Contact.objects.filter(user=instance)\n\n if len(contacts) == 0: # no matching contact\n Contact.objects.create(\n user=instance,\n first_name=first_name,\n last_name=last_name,\n email=email,\n phone=phone,\n )\n\n if len(contacts) >= 1 and is_new_user: # a matching contact\n contacts[0].user = instance\n contacts[0].save()\n\n if len(contacts) > 1: # multiple matches\n logger.warning(\n \"There are multiple Contacts with the same email address.\"\n f\" Picking #{contacts[0].id} for User #{instance.id}.\"\n )\n", "path": "src/registrar/signals.py"}]}
| 1,245 | 461 |
gh_patches_debug_32366
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-343
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
flash.Trainer.add_argparse_args(parser) doesn't register args to arg parse
## 🐛 Bug
```
parser = ArgumentParser()
parser = flash.Trainer.add_argparse_args(parser)
args = parser.parse_args()
```
Does not register any of the trainer args but the following code does
```
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
```
Since Flash wraps the PL trainer this should work the same way
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/core/trainer.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import warnings
15 from functools import wraps
16 from typing import Callable, List, Optional, Union
17
18 import torch
19 from pytorch_lightning import LightningDataModule, LightningModule, Trainer
20 from pytorch_lightning.callbacks import BaseFinetuning
21 from pytorch_lightning.utilities import rank_zero_warn
22 from pytorch_lightning.utilities.argparse import get_init_arguments_and_types, parse_env_variables
23 from pytorch_lightning.utilities.exceptions import MisconfigurationException
24 from torch.utils.data import DataLoader
25
26 import flash
27 from flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks
28
29
30 def _defaults_from_env_vars(fn: Callable) -> Callable:
31 """Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix
32 build error in readthedocs."""
33
34 @wraps(fn)
35 def insert_env_defaults(self, *args, **kwargs):
36 cls = self.__class__ # get the class
37 if args: # inace any args passed move them to kwargs
38 # parse only the argument names
39 cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]
40 # convert args to kwargs
41 kwargs.update({k: v for k, v in zip(cls_arg_names, args)})
42 env_variables = vars(parse_env_variables(cls))
43 # update the kwargs by env variables
44 kwargs = dict(list(env_variables.items()) + list(kwargs.items()))
45
46 # all args were already moved to kwargs
47 return fn(self, **kwargs)
48
49 return insert_env_defaults
50
51
52 class Trainer(Trainer):
53
54 @_defaults_from_env_vars
55 def __init__(self, *args, **kwargs):
56 if flash._IS_TESTING:
57 if torch.cuda.is_available():
58 kwargs["gpus"] = 1
59 kwargs["max_epochs"] = 3
60 kwargs["limit_train_batches"] = 1.0
61 kwargs["limit_val_batches"] = 1.0
62 kwargs["limit_test_batches"] = 1.0
63 kwargs["fast_dev_run"] = False
64 else:
65 kwargs["fast_dev_run"] = True
66 super().__init__(*args, **kwargs)
67
68 def fit(
69 self,
70 model: LightningModule,
71 train_dataloader: Optional[DataLoader] = None,
72 val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
73 datamodule: Optional[LightningDataModule] = None,
74 ):
75 r"""
76 Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`
77
78 Args:
79 datamodule: A instance of :class:`LightningDataModule`.
80
81 model: Model to fit.
82
83 train_dataloader: A Pytorch DataLoader with training samples. If the model has
84 a predefined train_dataloader method this will be skipped.
85
86 val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.
87 If the model has a predefined val_dataloaders method this will be skipped
88 """
89 if any(isinstance(c, BaseFinetuning) for c in self.callbacks):
90 # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user?
91 warnings.warn("Warning: You are calling fit(), but your trainer is using a fine-tuning callback")
92 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
93
94 def finetune(
95 self,
96 model: LightningModule,
97 train_dataloader: Optional[DataLoader] = None,
98 val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
99 datamodule: Optional[LightningDataModule] = None,
100 strategy: Optional[Union[str, BaseFinetuning]] = None,
101 ):
102 r"""
103
104 Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers
105 of the backbone throughout training layers of the backbone throughout training.
106
107 Args:
108 datamodule: A instance of :class:`LightningDataModule`.
109
110 model: Model to fit.
111
112 train_dataloader: A PyTorch DataLoader with training samples. If the model has
113 a predefined train_dataloader method this will be skipped.
114
115 val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples.
116 If the model has a predefined val_dataloaders method this will be skipped
117
118 strategy: Should either be a string or a finetuning callback subclassing
119 :class:`pytorch_lightning.callbacks.BaseFinetuning`.
120
121 Default strategies can be enabled with these strings:
122
123 - ``"no_freeze"``,
124 - ``"freeze"``,
125 - ``"freeze_unfreeze"``,
126 - ``"unfreeze_milestones"``.
127 """
128 self._resolve_callbacks(model, strategy)
129 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
130
131 def _resolve_callbacks(self, model, strategy):
132 """
133 This function is used to select the `BaseFinetuning` to be used for finetuning.
134 """
135 if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)):
136 raise MisconfigurationException(
137 "strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``"
138 f"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}"
139 )
140
141 if isinstance(strategy, BaseFinetuning):
142 callback = [strategy]
143 else:
144 # todo: change to ``configure_callbacks`` when merged to Lightning.
145 model_callback = model.configure_finetune_callback()
146 if len(model_callback) > 1:
147 raise MisconfigurationException(
148 f"{model} configure_finetune_callback should create a list with only 1 callback"
149 )
150 if len(model_callback) == 1:
151 if strategy is not None:
152 rank_zero_warn(
153 "The model contains a default finetune callback. The provided {strategy} will be overriden.\n"
154 " HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. ", UserWarning
155 )
156 callback = model_callback
157 else:
158 callback = instantiate_default_finetuning_callbacks(strategy)
159
160 self.callbacks = self._merge_callbacks(self.callbacks, callback)
161
162 @staticmethod
163 def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List:
164 """
165 This function keeps only 1 instance of each callback type,
166 extending new_callbacks with old_callbacks
167 """
168 if len(new_callbacks) == 0:
169 return old_callbacks
170 new_callbacks_types = set(type(c) for c in new_callbacks)
171 old_callbacks_types = set(type(c) for c in old_callbacks)
172 override_types = new_callbacks_types.intersection(old_callbacks_types)
173 new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)
174 return new_callbacks
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/core/trainer.py b/flash/core/trainer.py
--- a/flash/core/trainer.py
+++ b/flash/core/trainer.py
@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
+from argparse import ArgumentParser
from functools import wraps
from typing import Callable, List, Optional, Union
import torch
-from pytorch_lightning import LightningDataModule, LightningModule, Trainer
+from pytorch_lightning import LightningDataModule, LightningModule
+from pytorch_lightning import Trainer as PlTrainer
from pytorch_lightning.callbacks import BaseFinetuning
from pytorch_lightning.utilities import rank_zero_warn
-from pytorch_lightning.utilities.argparse import get_init_arguments_and_types, parse_env_variables
+from pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader
@@ -49,7 +51,7 @@
return insert_env_defaults
-class Trainer(Trainer):
+class Trainer(PlTrainer):
@_defaults_from_env_vars
def __init__(self, *args, **kwargs):
@@ -172,3 +174,9 @@
override_types = new_callbacks_types.intersection(old_callbacks_types)
new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)
return new_callbacks
+
+ @classmethod
+ def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser:
+ # the lightning trainer implementation does not support subclasses.
+ # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447
+ return add_argparse_args(PlTrainer, *args, **kwargs)
|
{"golden_diff": "diff --git a/flash/core/trainer.py b/flash/core/trainer.py\n--- a/flash/core/trainer.py\n+++ b/flash/core/trainer.py\n@@ -12,14 +12,16 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n import warnings\n+from argparse import ArgumentParser\n from functools import wraps\n from typing import Callable, List, Optional, Union\n \n import torch\n-from pytorch_lightning import LightningDataModule, LightningModule, Trainer\n+from pytorch_lightning import LightningDataModule, LightningModule\n+from pytorch_lightning import Trainer as PlTrainer\n from pytorch_lightning.callbacks import BaseFinetuning\n from pytorch_lightning.utilities import rank_zero_warn\n-from pytorch_lightning.utilities.argparse import get_init_arguments_and_types, parse_env_variables\n+from pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables\n from pytorch_lightning.utilities.exceptions import MisconfigurationException\n from torch.utils.data import DataLoader\n \n@@ -49,7 +51,7 @@\n return insert_env_defaults\n \n \n-class Trainer(Trainer):\n+class Trainer(PlTrainer):\n \n @_defaults_from_env_vars\n def __init__(self, *args, **kwargs):\n@@ -172,3 +174,9 @@\n override_types = new_callbacks_types.intersection(old_callbacks_types)\n new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)\n return new_callbacks\n+\n+ @classmethod\n+ def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser:\n+ # the lightning trainer implementation does not support subclasses.\n+ # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n+ return add_argparse_args(PlTrainer, *args, **kwargs)\n", "issue": " flash.Trainer.add_argparse_args(parser) doesn't register args to arg parse\n## \ud83d\udc1b Bug\r\n\r\n```\r\n parser = ArgumentParser()\r\n parser = flash.Trainer.add_argparse_args(parser)\r\n args = parser.parse_args()\r\n```\r\n\r\nDoes not register any of the trainer args but the following code does\r\n\r\n```\r\n parser = ArgumentParser()\r\n parser = pl.Trainer.add_argparse_args(parser)\r\n args = parser.parse_args()\r\n```\r\n\r\nSince Flash wraps the PL trainer this should work the same way\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom functools import wraps\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning import LightningDataModule, LightningModule, Trainer\nfrom pytorch_lightning.callbacks import BaseFinetuning\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.argparse import get_init_arguments_and_types, parse_env_variables\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader\n\nimport flash\nfrom flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks\n\n\ndef _defaults_from_env_vars(fn: Callable) -> Callable:\n \"\"\"Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix\n build error in readthedocs.\"\"\"\n\n @wraps(fn)\n def insert_env_defaults(self, *args, **kwargs):\n cls = self.__class__ # get the class\n if args: # inace any args passed move them to kwargs\n # parse only the argument names\n cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]\n # convert args to kwargs\n kwargs.update({k: v for k, v in zip(cls_arg_names, args)})\n env_variables = vars(parse_env_variables(cls))\n # update the kwargs by env variables\n kwargs = dict(list(env_variables.items()) + list(kwargs.items()))\n\n # all args were already moved to kwargs\n return fn(self, **kwargs)\n\n return insert_env_defaults\n\n\nclass Trainer(Trainer):\n\n @_defaults_from_env_vars\n def __init__(self, *args, **kwargs):\n if flash._IS_TESTING:\n if torch.cuda.is_available():\n kwargs[\"gpus\"] = 1\n kwargs[\"max_epochs\"] = 3\n kwargs[\"limit_train_batches\"] = 1.0\n kwargs[\"limit_val_batches\"] = 1.0\n kwargs[\"limit_test_batches\"] = 1.0\n kwargs[\"fast_dev_run\"] = False\n else:\n kwargs[\"fast_dev_run\"] = True\n super().__init__(*args, **kwargs)\n\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n ):\n r\"\"\"\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A Pytorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n \"\"\"\n if any(isinstance(c, BaseFinetuning) for c in self.callbacks):\n # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user?\n warnings.warn(\"Warning: You are calling fit(), but your trainer is using a fine-tuning callback\")\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def finetune(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n strategy: Optional[Union[str, BaseFinetuning]] = None,\n ):\n r\"\"\"\n\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers\n of the backbone throughout training layers of the backbone throughout training.\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A PyTorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n strategy: Should either be a string or a finetuning callback subclassing\n :class:`pytorch_lightning.callbacks.BaseFinetuning`.\n\n Default strategies can be enabled with these strings:\n\n - ``\"no_freeze\"``,\n - ``\"freeze\"``,\n - ``\"freeze_unfreeze\"``,\n - ``\"unfreeze_milestones\"``.\n \"\"\"\n self._resolve_callbacks(model, strategy)\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def _resolve_callbacks(self, model, strategy):\n \"\"\"\n This function is used to select the `BaseFinetuning` to be used for finetuning.\n \"\"\"\n if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)):\n raise MisconfigurationException(\n \"strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``\"\n f\"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}\"\n )\n\n if isinstance(strategy, BaseFinetuning):\n callback = [strategy]\n else:\n # todo: change to ``configure_callbacks`` when merged to Lightning.\n model_callback = model.configure_finetune_callback()\n if len(model_callback) > 1:\n raise MisconfigurationException(\n f\"{model} configure_finetune_callback should create a list with only 1 callback\"\n )\n if len(model_callback) == 1:\n if strategy is not None:\n rank_zero_warn(\n \"The model contains a default finetune callback. The provided {strategy} will be overriden.\\n\"\n \" HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. \", UserWarning\n )\n callback = model_callback\n else:\n callback = instantiate_default_finetuning_callbacks(strategy)\n\n self.callbacks = self._merge_callbacks(self.callbacks, callback)\n\n @staticmethod\n def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List:\n \"\"\"\n This function keeps only 1 instance of each callback type,\n extending new_callbacks with old_callbacks\n \"\"\"\n if len(new_callbacks) == 0:\n return old_callbacks\n new_callbacks_types = set(type(c) for c in new_callbacks)\n old_callbacks_types = set(type(c) for c in old_callbacks)\n override_types = new_callbacks_types.intersection(old_callbacks_types)\n new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)\n return new_callbacks\n", "path": "flash/core/trainer.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom argparse import ArgumentParser\nfrom functools import wraps\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning import LightningDataModule, LightningModule\nfrom pytorch_lightning import Trainer as PlTrainer\nfrom pytorch_lightning.callbacks import BaseFinetuning\nfrom pytorch_lightning.utilities import rank_zero_warn\nfrom pytorch_lightning.utilities.argparse import add_argparse_args, get_init_arguments_and_types, parse_env_variables\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import DataLoader\n\nimport flash\nfrom flash.core.finetuning import _DEFAULTS_FINETUNE_STRATEGIES, instantiate_default_finetuning_callbacks\n\n\ndef _defaults_from_env_vars(fn: Callable) -> Callable:\n \"\"\"Copy of ``pytorch_lightning.trainer.connectors.env_vars_connector._defaults_from_env_vars``. Required to fix\n build error in readthedocs.\"\"\"\n\n @wraps(fn)\n def insert_env_defaults(self, *args, **kwargs):\n cls = self.__class__ # get the class\n if args: # inace any args passed move them to kwargs\n # parse only the argument names\n cls_arg_names = [arg[0] for arg in get_init_arguments_and_types(cls)]\n # convert args to kwargs\n kwargs.update({k: v for k, v in zip(cls_arg_names, args)})\n env_variables = vars(parse_env_variables(cls))\n # update the kwargs by env variables\n kwargs = dict(list(env_variables.items()) + list(kwargs.items()))\n\n # all args were already moved to kwargs\n return fn(self, **kwargs)\n\n return insert_env_defaults\n\n\nclass Trainer(PlTrainer):\n\n @_defaults_from_env_vars\n def __init__(self, *args, **kwargs):\n if flash._IS_TESTING:\n if torch.cuda.is_available():\n kwargs[\"gpus\"] = 1\n kwargs[\"max_epochs\"] = 3\n kwargs[\"limit_train_batches\"] = 1.0\n kwargs[\"limit_val_batches\"] = 1.0\n kwargs[\"limit_test_batches\"] = 1.0\n kwargs[\"fast_dev_run\"] = False\n else:\n kwargs[\"fast_dev_run\"] = True\n super().__init__(*args, **kwargs)\n\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n ):\n r\"\"\"\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A Pytorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n \"\"\"\n if any(isinstance(c, BaseFinetuning) for c in self.callbacks):\n # TODO: if we find a finetuning callback in the trainer should we remove it? or just warn the user?\n warnings.warn(\"Warning: You are calling fit(), but your trainer is using a fine-tuning callback\")\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def finetune(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n datamodule: Optional[LightningDataModule] = None,\n strategy: Optional[Union[str, BaseFinetuning]] = None,\n ):\n r\"\"\"\n\n Runs the full optimization routine. Same as :meth:`pytorch_lightning.Trainer.fit`, but unfreezes layers\n of the backbone throughout training layers of the backbone throughout training.\n\n Args:\n datamodule: A instance of :class:`LightningDataModule`.\n\n model: Model to fit.\n\n train_dataloader: A PyTorch DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single PyTorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n strategy: Should either be a string or a finetuning callback subclassing\n :class:`pytorch_lightning.callbacks.BaseFinetuning`.\n\n Default strategies can be enabled with these strings:\n\n - ``\"no_freeze\"``,\n - ``\"freeze\"``,\n - ``\"freeze_unfreeze\"``,\n - ``\"unfreeze_milestones\"``.\n \"\"\"\n self._resolve_callbacks(model, strategy)\n return super().fit(model, train_dataloader, val_dataloaders, datamodule)\n\n def _resolve_callbacks(self, model, strategy):\n \"\"\"\n This function is used to select the `BaseFinetuning` to be used for finetuning.\n \"\"\"\n if strategy is not None and not isinstance(strategy, (str, BaseFinetuning)):\n raise MisconfigurationException(\n \"strategy should be a ``pytorch_lightning.callbacks.BaseFinetuning``\"\n f\"callback or a str within {list(_DEFAULTS_FINETUNE_STRATEGIES.keys())}\"\n )\n\n if isinstance(strategy, BaseFinetuning):\n callback = [strategy]\n else:\n # todo: change to ``configure_callbacks`` when merged to Lightning.\n model_callback = model.configure_finetune_callback()\n if len(model_callback) > 1:\n raise MisconfigurationException(\n f\"{model} configure_finetune_callback should create a list with only 1 callback\"\n )\n if len(model_callback) == 1:\n if strategy is not None:\n rank_zero_warn(\n \"The model contains a default finetune callback. The provided {strategy} will be overriden.\\n\"\n \" HINT: Provide a `BaseFinetuning` callback as strategy to make it prioritized. \", UserWarning\n )\n callback = model_callback\n else:\n callback = instantiate_default_finetuning_callbacks(strategy)\n\n self.callbacks = self._merge_callbacks(self.callbacks, callback)\n\n @staticmethod\n def _merge_callbacks(old_callbacks: List, new_callbacks: List) -> List:\n \"\"\"\n This function keeps only 1 instance of each callback type,\n extending new_callbacks with old_callbacks\n \"\"\"\n if len(new_callbacks) == 0:\n return old_callbacks\n new_callbacks_types = set(type(c) for c in new_callbacks)\n old_callbacks_types = set(type(c) for c in old_callbacks)\n override_types = new_callbacks_types.intersection(old_callbacks_types)\n new_callbacks.extend(c for c in old_callbacks if type(c) not in override_types)\n return new_callbacks\n\n @classmethod\n def add_argparse_args(cls, *args, **kwargs) -> ArgumentParser:\n # the lightning trainer implementation does not support subclasses.\n # context: https://github.com/PyTorchLightning/lightning-flash/issues/342#issuecomment-848892447\n return add_argparse_args(PlTrainer, *args, **kwargs)\n", "path": "flash/core/trainer.py"}]}
| 2,442 | 421 |
gh_patches_debug_12464
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1700
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rbenv default ruby issue on install
I am trying to install MDL v0.11.0
i get the following error during install
```
pre-commit
[INFO] Installing environment for git://github.com/markdownlint/markdownlint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/usr/local/bin/bash', '/Users/abuxton/.rbenv/shims/gem', 'build', 'mdl.gemspec')
return code: 1
expected return code: 0
stdout: (none)
stderr:
rbenv: version `default' is not installed (set by RBENV_VERSION environment variable)
```
seems to be the same issue as https://github.com/pre-commit/pre-commit/issues/369 on a previous version of pre-commit
pre-commit.log https://gist.github.com/abuxton/d29e6265f3230f838a746d7a5defd5e3
i have tried, ensuring local rbenv, I have tried pre-commit clean/uninstall and reinstall to no success, mdl will `brew install mdl` successfully and works fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/ruby.py`
Content:
```
1 import contextlib
2 import functools
3 import os.path
4 import shutil
5 import tarfile
6 from typing import Generator
7 from typing import Sequence
8 from typing import Tuple
9
10 import pre_commit.constants as C
11 from pre_commit.envcontext import envcontext
12 from pre_commit.envcontext import PatchesT
13 from pre_commit.envcontext import UNSET
14 from pre_commit.envcontext import Var
15 from pre_commit.hook import Hook
16 from pre_commit.languages import helpers
17 from pre_commit.prefix import Prefix
18 from pre_commit.util import CalledProcessError
19 from pre_commit.util import clean_path_on_failure
20 from pre_commit.util import resource_bytesio
21
22 ENVIRONMENT_DIR = 'rbenv'
23 healthy = helpers.basic_healthy
24
25
26 @functools.lru_cache(maxsize=1)
27 def get_default_version() -> str:
28 if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):
29 return 'system'
30 else:
31 return C.DEFAULT
32
33
34 def get_env_patch(
35 venv: str,
36 language_version: str,
37 ) -> PatchesT:
38 patches: PatchesT = (
39 ('GEM_HOME', os.path.join(venv, 'gems')),
40 ('GEM_PATH', UNSET),
41 ('BUNDLE_IGNORE_CONFIG', '1'),
42 )
43 if language_version == 'system':
44 patches += (
45 (
46 'PATH', (
47 os.path.join(venv, 'gems', 'bin'), os.pathsep,
48 Var('PATH'),
49 ),
50 ),
51 )
52 else: # pragma: win32 no cover
53 patches += (
54 ('RBENV_ROOT', venv),
55 ('RBENV_VERSION', language_version),
56 (
57 'PATH', (
58 os.path.join(venv, 'gems', 'bin'), os.pathsep,
59 os.path.join(venv, 'shims'), os.pathsep,
60 os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),
61 ),
62 ),
63 )
64 return patches
65
66
67 @contextlib.contextmanager
68 def in_env(
69 prefix: Prefix,
70 language_version: str,
71 ) -> Generator[None, None, None]:
72 envdir = prefix.path(
73 helpers.environment_dir(ENVIRONMENT_DIR, language_version),
74 )
75 with envcontext(get_env_patch(envdir, language_version)):
76 yield
77
78
79 def _extract_resource(filename: str, dest: str) -> None:
80 with resource_bytesio(filename) as bio:
81 with tarfile.open(fileobj=bio) as tf:
82 tf.extractall(dest)
83
84
85 def _install_rbenv(
86 prefix: Prefix,
87 version: str,
88 ) -> None: # pragma: win32 no cover
89 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
90
91 _extract_resource('rbenv.tar.gz', prefix.path('.'))
92 shutil.move(prefix.path('rbenv'), prefix.path(directory))
93
94 # Only install ruby-build if the version is specified
95 if version != C.DEFAULT:
96 plugins_dir = prefix.path(directory, 'plugins')
97 _extract_resource('ruby-download.tar.gz', plugins_dir)
98 _extract_resource('ruby-build.tar.gz', plugins_dir)
99
100
101 def _install_ruby(
102 prefix: Prefix,
103 version: str,
104 ) -> None: # pragma: win32 no cover
105 try:
106 helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))
107 except CalledProcessError: # pragma: no cover (usually find with download)
108 # Failed to download from mirror for some reason, build it instead
109 helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))
110
111
112 def install_environment(
113 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
114 ) -> None:
115 additional_dependencies = tuple(additional_dependencies)
116 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
117 with clean_path_on_failure(prefix.path(directory)):
118 if version != 'system': # pragma: win32 no cover
119 _install_rbenv(prefix, version)
120 with in_env(prefix, version):
121 # Need to call this before installing so rbenv's directories
122 # are set up
123 helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))
124 if version != C.DEFAULT:
125 _install_ruby(prefix, version)
126 # Need to call this after installing to set up the shims
127 helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))
128
129 with in_env(prefix, version):
130 helpers.run_setup_cmd(
131 prefix, ('gem', 'build', *prefix.star('.gemspec')),
132 )
133 helpers.run_setup_cmd(
134 prefix,
135 (
136 'gem', 'install',
137 '--no-document', '--no-format-executable',
138 *prefix.star('.gem'), *additional_dependencies,
139 ),
140 )
141
142
143 def run_hook(
144 hook: Hook,
145 file_args: Sequence[str],
146 color: bool,
147 ) -> Tuple[int, bytes]:
148 with in_env(hook.prefix, hook.language_version):
149 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -52,7 +52,6 @@
else: # pragma: win32 no cover
patches += (
('RBENV_ROOT', venv),
- ('RBENV_VERSION', language_version),
(
'PATH', (
os.path.join(venv, 'gems', 'bin'), os.pathsep,
@@ -61,6 +60,9 @@
),
),
)
+ if language_version not in {'system', 'default'}: # pragma: win32 no cover
+ patches += (('RBENV_VERSION', language_version),)
+
return patches
|
{"golden_diff": "diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py\n--- a/pre_commit/languages/ruby.py\n+++ b/pre_commit/languages/ruby.py\n@@ -52,7 +52,6 @@\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n- ('RBENV_VERSION', language_version),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n@@ -61,6 +60,9 @@\n ),\n ),\n )\n+ if language_version not in {'system', 'default'}: # pragma: win32 no cover\n+ patches += (('RBENV_VERSION', language_version),)\n+\n return patches\n", "issue": "Rbenv default ruby issue on install\nI am trying to install MDL v0.11.0\r\n\r\ni get the following error during install \r\n```\r\npre-commit\r\n[INFO] Installing environment for git://github.com/markdownlint/markdownlint.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: CalledProcessError: command: ('/usr/local/bin/bash', '/Users/abuxton/.rbenv/shims/gem', 'build', 'mdl.gemspec')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n rbenv: version `default' is not installed (set by RBENV_VERSION environment variable)\r\n```\r\n\r\nseems to be the same issue as https://github.com/pre-commit/pre-commit/issues/369 on a previous version of pre-commit\r\npre-commit.log https://gist.github.com/abuxton/d29e6265f3230f838a746d7a5defd5e3 \r\n\r\ni have tried, ensuring local rbenv, I have tried pre-commit clean/uninstall and reinstall to no success, mdl will `brew install mdl` successfully and works fine. \r\n\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n ('RBENV_VERSION', language_version),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import resource_bytesio\n\nENVIRONMENT_DIR = 'rbenv'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(helpers.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, language_version),\n )\n with envcontext(get_env_patch(envdir, language_version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), prefix.path(directory))\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = prefix.path(directory, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n helpers.run_setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n helpers.run_setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n with clean_path_on_failure(prefix.path(directory)):\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n helpers.run_setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n helpers.run_setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n helpers.run_setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/ruby.py"}]}
| 1,948 | 173 |
gh_patches_debug_11775
|
rasdani/github-patches
|
git_diff
|
AUTOMATIC1111__stable-diffusion-webui-14186
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor' torchvision 0.17 promblem
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What happened?
ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'
crash
https://github.com/pytorch/pytorch/issues/103973#issuecomment-1813303128
### Steps to reproduce the problem
1. Install
https://github.com/pytorch/pytorch/issues/103973#issuecomment-1813214452
2. Run webui
### What should have happened?
Run normaly
### Sysinfo
AMD Radeon VII
AMD® Fx(tm)-9590 eight-core processor × 8 (not supporting pci atomics)
32 gb ram
Ubuntu 22.04.3 LTS x64
### What browsers do you use to access the UI ?
Mozilla Firefox
### Console logs
```Shell
(venv) b_cansin@b-cansin-ubuntu:/media/b_cansin/ai/ai/stable-diffusion-webui$ ./webui.sh
################################################################
Install script for stable-diffusion + Web UI
Tested on Debian 11 (Bullseye)
################################################################
################################################################
Running on b_cansin user
################################################################
################################################################
Repo already cloned, using it as install directory
################################################################
################################################################
python venv already activate or run without venv: /media/b_cansin/ai/ai/stable-diffusion-webui/venv
################################################################
################################################################
Launching launch.py...
################################################################
Using TCMalloc: libtcmalloc_minimal.so.4
fatal: No names found, cannot describe anything.
Python 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]
Version: 1.6.1
Commit hash: 7ba3923d5b494b7756d0b12f33acb3716d830b9a
Launching Web UI with arguments:
2023-11-16 00:38:11.762329: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
*** Error setting up CodeFormer
Traceback (most recent call last):
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer_model.py", line 30, in setup_model
from modules.codeformer.codeformer_arch import CodeFormer
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer/codeformer_arch.py", line 9, in <module>
from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer/vqgan_arch.py", line 11, in <module>
from basicsr.utils import get_root_logger
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/__init__.py", line 4, in <module>
from .data import *
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py", line 22, in <module>
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py", line 22, in <listcomp>
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/realesrgan_dataset.py", line 11, in <module>
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/degradations.py", line 8, in <module>
from torchvision.transforms.functional_tensor import rgb_to_grayscale
ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'
---
Traceback (most recent call last):
File "/media/b_cansin/ai/ai/stable-diffusion-webui/launch.py", line 48, in <module>
main()
File "/media/b_cansin/ai/ai/stable-diffusion-webui/launch.py", line 44, in main
start()
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/launch_utils.py", line 436, in start
webui.webui()
File "/media/b_cansin/ai/ai/stable-diffusion-webui/webui.py", line 52, in webui
initialize.initialize()
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/initialize.py", line 71, in initialize
from modules import gfpgan_model
File "/media/b_cansin/ai/ai/stable-diffusion-webui/modules/gfpgan_model.py", line 4, in <module>
import gfpgan
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/__init__.py", line 3, in <module>
from .data import *
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/__init__.py", line 10, in <module>
_dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/__init__.py", line 10, in <listcomp>
_dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]
File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/ffhq_degradation_dataset.py", line 7, in <module>
from basicsr.data import degradations as degradations
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/__init__.py", line 4, in <module>
from .data import *
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py", line 22, in <module>
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py", line 22, in <listcomp>
_dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]
File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/realesrgan_dataset.py", line 11, in <module>
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
File "/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/degradations.py", line 8, in <module>
from torchvision.transforms.functional_tensor import rgb_to_grayscale
ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'
(venv) b_cansin@b-cansin-ubuntu:/media/b_cansin/ai/ai/stable-diffusion-webui$
```
### Additional information
As @hongxiayang said in here:
https://github.com/pytorch/pytorch/issues/103973#issuecomment-1813380169
Need support torchvision 0.17 because of torchvision.transforms.functional deprecated and removed in 0.17. Could we get dev version that support 0.17?
After than we will test if this wheel fix non pci atomics problem of ROCM
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modules/import_hook.py`
Content:
```
1 import sys
2
3 # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
4 if "--xformers" not in "".join(sys.argv):
5 sys.modules["xformers"] = None
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modules/import_hook.py b/modules/import_hook.py
--- a/modules/import_hook.py
+++ b/modules/import_hook.py
@@ -3,3 +3,14 @@
# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
if "--xformers" not in "".join(sys.argv):
sys.modules["xformers"] = None
+
+# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks
+# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985
+try:
+ import torchvision.transforms.functional_tensor # noqa: F401
+except ImportError:
+ try:
+ import torchvision.transforms.functional as functional
+ sys.modules["torchvision.transforms.functional_tensor"] = functional
+ except ImportError:
+ pass # shrug...
|
{"golden_diff": "diff --git a/modules/import_hook.py b/modules/import_hook.py\n--- a/modules/import_hook.py\n+++ b/modules/import_hook.py\n@@ -3,3 +3,14 @@\n # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it\n if \"--xformers\" not in \"\".join(sys.argv):\n sys.modules[\"xformers\"] = None\n+\n+# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks\n+# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985\n+try:\n+ import torchvision.transforms.functional_tensor # noqa: F401\n+except ImportError:\n+ try:\n+ import torchvision.transforms.functional as functional\n+ sys.modules[\"torchvision.transforms.functional_tensor\"] = functional\n+ except ImportError:\n+ pass # shrug...\n", "issue": "[Bug]: ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor' torchvision 0.17 promblem\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'\r\ncrash\r\nhttps://github.com/pytorch/pytorch/issues/103973#issuecomment-1813303128\n\n### Steps to reproduce the problem\n\n1. Install\r\nhttps://github.com/pytorch/pytorch/issues/103973#issuecomment-1813214452\r\n2. Run webui\n\n### What should have happened?\n\nRun normaly\n\n### Sysinfo\n\nAMD Radeon VII\r\nAMD\u00ae Fx(tm)-9590 eight-core processor \u00d7 8 (not supporting pci atomics)\r\n32 gb ram\r\nUbuntu 22.04.3 LTS x64\n\n### What browsers do you use to access the UI ?\n\nMozilla Firefox\n\n### Console logs\n\n```Shell\n(venv) b_cansin@b-cansin-ubuntu:/media/b_cansin/ai/ai/stable-diffusion-webui$ ./webui.sh\r\n\r\n################################################################\r\nInstall script for stable-diffusion + Web UI\r\nTested on Debian 11 (Bullseye)\r\n################################################################\r\n\r\n################################################################\r\nRunning on b_cansin user\r\n################################################################\r\n\r\n################################################################\r\nRepo already cloned, using it as install directory\r\n################################################################\r\n\r\n################################################################\r\npython venv already activate or run without venv: /media/b_cansin/ai/ai/stable-diffusion-webui/venv\r\n################################################################\r\n\r\n################################################################\r\nLaunching launch.py...\r\n################################################################\r\nUsing TCMalloc: libtcmalloc_minimal.so.4\r\nfatal: No names found, cannot describe anything.\r\nPython 3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]\r\nVersion: 1.6.1\r\nCommit hash: 7ba3923d5b494b7756d0b12f33acb3716d830b9a\r\nLaunching Web UI with arguments: \r\n2023-11-16 00:38:11.762329: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\nno module 'xformers'. Processing without...\r\nno module 'xformers'. Processing without...\r\nNo module 'xformers'. Proceeding without it.\r\n*** Error setting up CodeFormer\r\n Traceback (most recent call last):\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer_model.py\", line 30, in setup_model\r\n from modules.codeformer.codeformer_arch import CodeFormer\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer/codeformer_arch.py\", line 9, in <module>\r\n from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/codeformer/vqgan_arch.py\", line 11, in <module>\r\n from basicsr.utils import get_root_logger\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/__init__.py\", line 4, in <module>\r\n from .data import *\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py\", line 22, in <module>\r\n _dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py\", line 22, in <listcomp>\r\n _dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/usr/lib/python3.10/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/realesrgan_dataset.py\", line 11, in <module>\r\n from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/degradations.py\", line 8, in <module>\r\n from torchvision.transforms.functional_tensor import rgb_to_grayscale\r\n ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'\r\n\r\n---\r\nTraceback (most recent call last):\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/launch.py\", line 48, in <module>\r\n main()\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/launch.py\", line 44, in main\r\n start()\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/launch_utils.py\", line 436, in start\r\n webui.webui()\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/webui.py\", line 52, in webui\r\n initialize.initialize()\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/initialize.py\", line 71, in initialize\r\n from modules import gfpgan_model\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/modules/gfpgan_model.py\", line 4, in <module>\r\n import gfpgan\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/__init__.py\", line 3, in <module>\r\n from .data import *\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/__init__.py\", line 10, in <module>\r\n _dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/__init__.py\", line 10, in <listcomp>\r\n _dataset_modules = [importlib.import_module(f'gfpgan.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/usr/lib/python3.10/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/gfpgan/data/ffhq_degradation_dataset.py\", line 7, in <module>\r\n from basicsr.data import degradations as degradations\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/__init__.py\", line 4, in <module>\r\n from .data import *\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py\", line 22, in <module>\r\n _dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/__init__.py\", line 22, in <listcomp>\r\n _dataset_modules = [importlib.import_module(f'basicsr.data.{file_name}') for file_name in dataset_filenames]\r\n File \"/usr/lib/python3.10/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/realesrgan_dataset.py\", line 11, in <module>\r\n from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels\r\n File \"/media/b_cansin/ai/ai/stable-diffusion-webui/venv/lib/python3.10/site-packages/basicsr/data/degradations.py\", line 8, in <module>\r\n from torchvision.transforms.functional_tensor import rgb_to_grayscale\r\nModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'\r\n(venv) b_cansin@b-cansin-ubuntu:/media/b_cansin/ai/ai/stable-diffusion-webui$\n```\n\n\n### Additional information\n\nAs @hongxiayang said in here:\r\nhttps://github.com/pytorch/pytorch/issues/103973#issuecomment-1813380169\r\nNeed support torchvision 0.17 because of torchvision.transforms.functional deprecated and removed in 0.17. Could we get dev version that support 0.17?\r\nAfter than we will test if this wheel fix non pci atomics problem of ROCM\n", "before_files": [{"content": "import sys\n\n# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it\nif \"--xformers\" not in \"\".join(sys.argv):\n sys.modules[\"xformers\"] = None\n", "path": "modules/import_hook.py"}], "after_files": [{"content": "import sys\n\n# this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it\nif \"--xformers\" not in \"\".join(sys.argv):\n sys.modules[\"xformers\"] = None\n\n# Hack to fix a changed import in torchvision 0.17+, which otherwise breaks\n# basicsr; see https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/13985\ntry:\n import torchvision.transforms.functional_tensor # noqa: F401\nexcept ImportError:\n try:\n import torchvision.transforms.functional as functional\n sys.modules[\"torchvision.transforms.functional_tensor\"] = functional\n except ImportError:\n pass # shrug...\n", "path": "modules/import_hook.py"}]}
| 2,534 | 200 |
gh_patches_debug_18436
|
rasdani/github-patches
|
git_diff
|
scalableminds__webknossos-libs-857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Converting Knossos dataset fails, chunk_shape must be multiple of mag*chunk_shape*chunks_per_shard of the view
## Context
- Affected library: wkcuber
## Expected Behavior
Should output wkw dataset
## Current Behavior
```
2023-01-13 23:33:20,047 INFO Converting KNOSSOS dataset
2023-01-13 23:33:20,048 INFO Assumed data type is uint8
2023-01-13 23:33:21,125 INFO Using pool of 2 workers.
Converting knossos layer my_layer ━━━━━━━━━━━━━━━━━ 100% 0:01:17 | 0:00:002023-01-13 23:34:39,207 INFO Using pool of 2 workers.
Traceback (most recent call last):
File "/usr/local/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py", line 117, in <module>
main()
File "/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py", line 113, in main
cube_with_args(args)
File "/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py", line 81, in cube_with_args
auto_detect_and_run_conversion(args)
File "/usr/local/lib/python3.9/site-packages/wkcuber/converter.py", line 567, in main
should_write_metadata = matching_converters[0].convert_input(args)
File "/usr/local/lib/python3.9/site-packages/wkcuber/converter.py", line 258, in convert_input
convert_knossos(args)
File "/usr/local/lib/python3.9/site-packages/wkcuber/convert_knossos.py", line 144, in main
convert_knossos(
File "/usr/local/lib/python3.9/site-packages/wkcuber/convert_knossos.py", line 135, in convert_knossos
target_mag.for_each_chunk(
File "/usr/local/lib/python3.9/site-packages/webknossos/dataset/view.py", line 742, in for_each_chunk
self._check_chunk_shape(chunk_shape, read_only=self.read_only)
File "/usr/local/lib/python3.9/site-packages/webknossos/dataset/view.py", line 944, in _check_chunk_shape
assert chunk_shape % divisor == Vec3Int.zeros(), (
AssertionError: The chunk_shape Vec3Int(1024,1024,1024) must be a multiple of mag*chunk_shape*chunks_per_shard of the view, which is Vec3Int(8192,8192,8192)).
```
## Steps to Reproduce the bug
Got this for two knossos datasets recently, tested no other. Talk to me for sample data.
## Your Environment for bug
linux, python 3.9, wkcuber 0.11.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wkcuber/wkcuber/convert_knossos.py`
Content:
```
1 import logging
2 from argparse import ArgumentParser, Namespace
3 from functools import partial
4 from pathlib import Path
5 from typing import Optional, Tuple, cast
6
7 import numpy as np
8 from webknossos import COLOR_CATEGORY, BoundingBox, DataFormat, Dataset, Vec3Int, View
9 from webknossos.utils import time_start, time_stop
10
11 from ._internal.knossos import CUBE_EDGE_LEN
12 from ._internal.utils import (
13 KnossosDatasetInfo,
14 add_data_format_flags,
15 add_distribution_flags,
16 add_verbose_flag,
17 add_voxel_size_flag,
18 get_executor_for_args,
19 open_knossos,
20 parse_path,
21 setup_logging,
22 setup_warnings,
23 )
24
25
26 def create_parser() -> ArgumentParser:
27 parser = ArgumentParser()
28
29 parser.add_argument(
30 "source_path",
31 help="Directory containing the source KNOSSOS dataset.",
32 type=Path,
33 )
34
35 parser.add_argument(
36 "target_path",
37 help="Output directory for the generated WKW dataset.",
38 type=parse_path,
39 )
40
41 parser.add_argument(
42 "--layer_name",
43 "-l",
44 help="Name of the cubed layer (color or segmentation)",
45 default="color",
46 )
47
48 parser.add_argument(
49 "--dtype",
50 "-d",
51 help="Target datatype (e.g. uint8, uint16, uint32)",
52 default="uint8",
53 )
54
55 add_voxel_size_flag(parser)
56
57 parser.add_argument("--mag", "-m", help="Magnification level", type=int, default=1)
58
59 add_verbose_flag(parser)
60 add_distribution_flags(parser)
61 add_data_format_flags(parser)
62
63 return parser
64
65
66 def convert_cube_job(
67 source_knossos_info: KnossosDatasetInfo, args: Tuple[View, int]
68 ) -> None:
69 target_view, _ = args
70
71 time_start(f"Converting of {target_view.bounding_box}")
72 cube_size = cast(Tuple[int, int, int], (CUBE_EDGE_LEN,) * 3)
73
74 offset = target_view.bounding_box.in_mag(target_view.mag).topleft
75 size = target_view.bounding_box.in_mag(target_view.mag).size
76 buffer = np.zeros(size.to_tuple(), dtype=target_view.get_dtype())
77 with open_knossos(source_knossos_info) as source_knossos:
78 for x in range(0, size.x, CUBE_EDGE_LEN):
79 for y in range(0, size.y, CUBE_EDGE_LEN):
80 for z in range(0, size.z, CUBE_EDGE_LEN):
81 cube_data = source_knossos.read(
82 (offset + Vec3Int(x, y, z)).to_tuple(), cube_size
83 )
84 buffer[
85 x : (x + CUBE_EDGE_LEN),
86 y : (y + CUBE_EDGE_LEN),
87 z : (z + CUBE_EDGE_LEN),
88 ] = cube_data
89 target_view.write(buffer)
90
91 time_stop(f"Converting of {target_view.bounding_box}")
92
93
94 def convert_knossos(
95 source_path: Path,
96 target_path: Path,
97 layer_name: str,
98 dtype: str,
99 voxel_size: Tuple[float, float, float],
100 data_format: DataFormat,
101 chunk_shape: Vec3Int,
102 chunks_per_shard: Vec3Int,
103 mag: int = 1,
104 args: Optional[Namespace] = None,
105 ) -> None:
106 source_knossos_info = KnossosDatasetInfo(source_path, dtype)
107
108 target_dataset = Dataset(target_path, voxel_size, exist_ok=True)
109 target_layer = target_dataset.get_or_add_layer(
110 layer_name,
111 COLOR_CATEGORY,
112 data_format=data_format,
113 dtype_per_channel=dtype,
114 )
115
116 with open_knossos(source_knossos_info) as source_knossos:
117 knossos_cubes = np.array(list(source_knossos.list_cubes()))
118 if len(knossos_cubes) == 0:
119 logging.error(
120 "No input KNOSSOS cubes found. Make sure to pass the path which points to a KNOSSOS magnification (e.g., testdata/knossos/color/1)."
121 )
122 exit(1)
123
124 min_xyz = knossos_cubes.min(axis=0) * CUBE_EDGE_LEN
125 max_xyz = (knossos_cubes.max(axis=0) + 1) * CUBE_EDGE_LEN
126 target_layer.bounding_box = BoundingBox(
127 Vec3Int(min_xyz), Vec3Int(max_xyz - min_xyz)
128 )
129
130 target_mag = target_layer.get_or_add_mag(
131 mag, chunk_shape=chunk_shape, chunks_per_shard=chunks_per_shard
132 )
133
134 with get_executor_for_args(args) as executor:
135 target_mag.for_each_chunk(
136 partial(convert_cube_job, source_knossos_info),
137 chunk_shape=chunk_shape * chunks_per_shard,
138 executor=executor,
139 progress_desc=f"Converting knossos layer {layer_name}",
140 )
141
142
143 def main(args: Namespace) -> None:
144 convert_knossos(
145 args.source_path,
146 args.target_path,
147 args.layer_name,
148 args.dtype,
149 args.voxel_size,
150 args.data_format,
151 args.chunk_shape,
152 args.chunks_per_shard,
153 args.mag,
154 args,
155 )
156
157
158 if __name__ == "__main__":
159 setup_warnings()
160 args = create_parser().parse_args()
161 setup_logging(args)
162
163 main(args)
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wkcuber/wkcuber/convert_knossos.py b/wkcuber/wkcuber/convert_knossos.py
--- a/wkcuber/wkcuber/convert_knossos.py
+++ b/wkcuber/wkcuber/convert_knossos.py
@@ -98,7 +98,7 @@
dtype: str,
voxel_size: Tuple[float, float, float],
data_format: DataFormat,
- chunk_shape: Vec3Int,
+ chunk_shape: Vec3Int, # in target-mag
chunks_per_shard: Vec3Int,
mag: int = 1,
args: Optional[Namespace] = None,
@@ -134,7 +134,7 @@
with get_executor_for_args(args) as executor:
target_mag.for_each_chunk(
partial(convert_cube_job, source_knossos_info),
- chunk_shape=chunk_shape * chunks_per_shard,
+ chunk_shape=chunk_shape * mag * chunks_per_shard,
executor=executor,
progress_desc=f"Converting knossos layer {layer_name}",
)
|
{"golden_diff": "diff --git a/wkcuber/wkcuber/convert_knossos.py b/wkcuber/wkcuber/convert_knossos.py\n--- a/wkcuber/wkcuber/convert_knossos.py\n+++ b/wkcuber/wkcuber/convert_knossos.py\n@@ -98,7 +98,7 @@\n dtype: str,\n voxel_size: Tuple[float, float, float],\n data_format: DataFormat,\n- chunk_shape: Vec3Int,\n+ chunk_shape: Vec3Int, # in target-mag\n chunks_per_shard: Vec3Int,\n mag: int = 1,\n args: Optional[Namespace] = None,\n@@ -134,7 +134,7 @@\n with get_executor_for_args(args) as executor:\n target_mag.for_each_chunk(\n partial(convert_cube_job, source_knossos_info),\n- chunk_shape=chunk_shape * chunks_per_shard,\n+ chunk_shape=chunk_shape * mag * chunks_per_shard,\n executor=executor,\n progress_desc=f\"Converting knossos layer {layer_name}\",\n )\n", "issue": "Converting Knossos dataset fails, chunk_shape must be multiple of mag*chunk_shape*chunks_per_shard of the view\n## Context\r\n- Affected library: wkcuber\r\n\r\n## Expected Behavior\r\nShould output wkw dataset\r\n\r\n## Current Behavior\r\n```\r\n2023-01-13 23:33:20,047 INFO Converting KNOSSOS dataset\r\n2023-01-13 23:33:20,048 INFO Assumed data type is uint8\r\n2023-01-13 23:33:21,125 INFO Using pool of 2 workers.\r\nConverting knossos layer my_layer \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 100% 0:01:17 | 0:00:002023-01-13 23:34:39,207 INFO Using pool of 2 workers.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/runpy.py\", line 197, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/local/lib/python3.9/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py\", line 117, in <module>\r\n main()\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py\", line 113, in main\r\n cube_with_args(args)\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/__main__.py\", line 81, in cube_with_args\r\n auto_detect_and_run_conversion(args)\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/converter.py\", line 567, in main\r\n should_write_metadata = matching_converters[0].convert_input(args)\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/converter.py\", line 258, in convert_input\r\n convert_knossos(args)\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/convert_knossos.py\", line 144, in main\r\n convert_knossos(\r\n File \"/usr/local/lib/python3.9/site-packages/wkcuber/convert_knossos.py\", line 135, in convert_knossos\r\n target_mag.for_each_chunk(\r\n File \"/usr/local/lib/python3.9/site-packages/webknossos/dataset/view.py\", line 742, in for_each_chunk\r\n self._check_chunk_shape(chunk_shape, read_only=self.read_only)\r\n File \"/usr/local/lib/python3.9/site-packages/webknossos/dataset/view.py\", line 944, in _check_chunk_shape\r\n assert chunk_shape % divisor == Vec3Int.zeros(), (\r\nAssertionError: The chunk_shape Vec3Int(1024,1024,1024) must be a multiple of mag*chunk_shape*chunks_per_shard of the view, which is Vec3Int(8192,8192,8192)).\r\n```\r\n\r\n## Steps to Reproduce the bug\r\nGot this for two knossos datasets recently, tested no other. Talk to me for sample data.\r\n\r\n## Your Environment for bug\r\nlinux, python 3.9, wkcuber 0.11.1\n", "before_files": [{"content": "import logging\nfrom argparse import ArgumentParser, Namespace\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Optional, Tuple, cast\n\nimport numpy as np\nfrom webknossos import COLOR_CATEGORY, BoundingBox, DataFormat, Dataset, Vec3Int, View\nfrom webknossos.utils import time_start, time_stop\n\nfrom ._internal.knossos import CUBE_EDGE_LEN\nfrom ._internal.utils import (\n KnossosDatasetInfo,\n add_data_format_flags,\n add_distribution_flags,\n add_verbose_flag,\n add_voxel_size_flag,\n get_executor_for_args,\n open_knossos,\n parse_path,\n setup_logging,\n setup_warnings,\n)\n\n\ndef create_parser() -> ArgumentParser:\n parser = ArgumentParser()\n\n parser.add_argument(\n \"source_path\",\n help=\"Directory containing the source KNOSSOS dataset.\",\n type=Path,\n )\n\n parser.add_argument(\n \"target_path\",\n help=\"Output directory for the generated WKW dataset.\",\n type=parse_path,\n )\n\n parser.add_argument(\n \"--layer_name\",\n \"-l\",\n help=\"Name of the cubed layer (color or segmentation)\",\n default=\"color\",\n )\n\n parser.add_argument(\n \"--dtype\",\n \"-d\",\n help=\"Target datatype (e.g. uint8, uint16, uint32)\",\n default=\"uint8\",\n )\n\n add_voxel_size_flag(parser)\n\n parser.add_argument(\"--mag\", \"-m\", help=\"Magnification level\", type=int, default=1)\n\n add_verbose_flag(parser)\n add_distribution_flags(parser)\n add_data_format_flags(parser)\n\n return parser\n\n\ndef convert_cube_job(\n source_knossos_info: KnossosDatasetInfo, args: Tuple[View, int]\n) -> None:\n target_view, _ = args\n\n time_start(f\"Converting of {target_view.bounding_box}\")\n cube_size = cast(Tuple[int, int, int], (CUBE_EDGE_LEN,) * 3)\n\n offset = target_view.bounding_box.in_mag(target_view.mag).topleft\n size = target_view.bounding_box.in_mag(target_view.mag).size\n buffer = np.zeros(size.to_tuple(), dtype=target_view.get_dtype())\n with open_knossos(source_knossos_info) as source_knossos:\n for x in range(0, size.x, CUBE_EDGE_LEN):\n for y in range(0, size.y, CUBE_EDGE_LEN):\n for z in range(0, size.z, CUBE_EDGE_LEN):\n cube_data = source_knossos.read(\n (offset + Vec3Int(x, y, z)).to_tuple(), cube_size\n )\n buffer[\n x : (x + CUBE_EDGE_LEN),\n y : (y + CUBE_EDGE_LEN),\n z : (z + CUBE_EDGE_LEN),\n ] = cube_data\n target_view.write(buffer)\n\n time_stop(f\"Converting of {target_view.bounding_box}\")\n\n\ndef convert_knossos(\n source_path: Path,\n target_path: Path,\n layer_name: str,\n dtype: str,\n voxel_size: Tuple[float, float, float],\n data_format: DataFormat,\n chunk_shape: Vec3Int,\n chunks_per_shard: Vec3Int,\n mag: int = 1,\n args: Optional[Namespace] = None,\n) -> None:\n source_knossos_info = KnossosDatasetInfo(source_path, dtype)\n\n target_dataset = Dataset(target_path, voxel_size, exist_ok=True)\n target_layer = target_dataset.get_or_add_layer(\n layer_name,\n COLOR_CATEGORY,\n data_format=data_format,\n dtype_per_channel=dtype,\n )\n\n with open_knossos(source_knossos_info) as source_knossos:\n knossos_cubes = np.array(list(source_knossos.list_cubes()))\n if len(knossos_cubes) == 0:\n logging.error(\n \"No input KNOSSOS cubes found. Make sure to pass the path which points to a KNOSSOS magnification (e.g., testdata/knossos/color/1).\"\n )\n exit(1)\n\n min_xyz = knossos_cubes.min(axis=0) * CUBE_EDGE_LEN\n max_xyz = (knossos_cubes.max(axis=0) + 1) * CUBE_EDGE_LEN\n target_layer.bounding_box = BoundingBox(\n Vec3Int(min_xyz), Vec3Int(max_xyz - min_xyz)\n )\n\n target_mag = target_layer.get_or_add_mag(\n mag, chunk_shape=chunk_shape, chunks_per_shard=chunks_per_shard\n )\n\n with get_executor_for_args(args) as executor:\n target_mag.for_each_chunk(\n partial(convert_cube_job, source_knossos_info),\n chunk_shape=chunk_shape * chunks_per_shard,\n executor=executor,\n progress_desc=f\"Converting knossos layer {layer_name}\",\n )\n\n\ndef main(args: Namespace) -> None:\n convert_knossos(\n args.source_path,\n args.target_path,\n args.layer_name,\n args.dtype,\n args.voxel_size,\n args.data_format,\n args.chunk_shape,\n args.chunks_per_shard,\n args.mag,\n args,\n )\n\n\nif __name__ == \"__main__\":\n setup_warnings()\n args = create_parser().parse_args()\n setup_logging(args)\n\n main(args)\n", "path": "wkcuber/wkcuber/convert_knossos.py"}], "after_files": [{"content": "import logging\nfrom argparse import ArgumentParser, Namespace\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Optional, Tuple, cast\n\nimport numpy as np\nfrom webknossos import COLOR_CATEGORY, BoundingBox, DataFormat, Dataset, Vec3Int, View\nfrom webknossos.utils import time_start, time_stop\n\nfrom ._internal.knossos import CUBE_EDGE_LEN\nfrom ._internal.utils import (\n KnossosDatasetInfo,\n add_data_format_flags,\n add_distribution_flags,\n add_verbose_flag,\n add_voxel_size_flag,\n get_executor_for_args,\n open_knossos,\n parse_path,\n setup_logging,\n setup_warnings,\n)\n\n\ndef create_parser() -> ArgumentParser:\n parser = ArgumentParser()\n\n parser.add_argument(\n \"source_path\",\n help=\"Directory containing the source KNOSSOS dataset.\",\n type=Path,\n )\n\n parser.add_argument(\n \"target_path\",\n help=\"Output directory for the generated WKW dataset.\",\n type=parse_path,\n )\n\n parser.add_argument(\n \"--layer_name\",\n \"-l\",\n help=\"Name of the cubed layer (color or segmentation)\",\n default=\"color\",\n )\n\n parser.add_argument(\n \"--dtype\",\n \"-d\",\n help=\"Target datatype (e.g. uint8, uint16, uint32)\",\n default=\"uint8\",\n )\n\n add_voxel_size_flag(parser)\n\n parser.add_argument(\"--mag\", \"-m\", help=\"Magnification level\", type=int, default=1)\n\n add_verbose_flag(parser)\n add_distribution_flags(parser)\n add_data_format_flags(parser)\n\n return parser\n\n\ndef convert_cube_job(\n source_knossos_info: KnossosDatasetInfo, args: Tuple[View, int]\n) -> None:\n target_view, _ = args\n\n time_start(f\"Converting of {target_view.bounding_box}\")\n cube_size = cast(Tuple[int, int, int], (CUBE_EDGE_LEN,) * 3)\n\n offset = target_view.bounding_box.in_mag(target_view.mag).topleft\n size = target_view.bounding_box.in_mag(target_view.mag).size\n buffer = np.zeros(size.to_tuple(), dtype=target_view.get_dtype())\n with open_knossos(source_knossos_info) as source_knossos:\n for x in range(0, size.x, CUBE_EDGE_LEN):\n for y in range(0, size.y, CUBE_EDGE_LEN):\n for z in range(0, size.z, CUBE_EDGE_LEN):\n cube_data = source_knossos.read(\n (offset + Vec3Int(x, y, z)).to_tuple(), cube_size\n )\n buffer[\n x : (x + CUBE_EDGE_LEN),\n y : (y + CUBE_EDGE_LEN),\n z : (z + CUBE_EDGE_LEN),\n ] = cube_data\n target_view.write(buffer)\n\n time_stop(f\"Converting of {target_view.bounding_box}\")\n\n\ndef convert_knossos(\n source_path: Path,\n target_path: Path,\n layer_name: str,\n dtype: str,\n voxel_size: Tuple[float, float, float],\n data_format: DataFormat,\n chunk_shape: Vec3Int, # in target-mag\n chunks_per_shard: Vec3Int,\n mag: int = 1,\n args: Optional[Namespace] = None,\n) -> None:\n source_knossos_info = KnossosDatasetInfo(source_path, dtype)\n\n target_dataset = Dataset(target_path, voxel_size, exist_ok=True)\n target_layer = target_dataset.get_or_add_layer(\n layer_name,\n COLOR_CATEGORY,\n data_format=data_format,\n dtype_per_channel=dtype,\n )\n\n with open_knossos(source_knossos_info) as source_knossos:\n knossos_cubes = np.array(list(source_knossos.list_cubes()))\n if len(knossos_cubes) == 0:\n logging.error(\n \"No input KNOSSOS cubes found. Make sure to pass the path which points to a KNOSSOS magnification (e.g., testdata/knossos/color/1).\"\n )\n exit(1)\n\n min_xyz = knossos_cubes.min(axis=0) * CUBE_EDGE_LEN\n max_xyz = (knossos_cubes.max(axis=0) + 1) * CUBE_EDGE_LEN\n target_layer.bounding_box = BoundingBox(\n Vec3Int(min_xyz), Vec3Int(max_xyz - min_xyz)\n )\n\n target_mag = target_layer.get_or_add_mag(\n mag, chunk_shape=chunk_shape, chunks_per_shard=chunks_per_shard\n )\n\n with get_executor_for_args(args) as executor:\n target_mag.for_each_chunk(\n partial(convert_cube_job, source_knossos_info),\n chunk_shape=chunk_shape * mag * chunks_per_shard,\n executor=executor,\n progress_desc=f\"Converting knossos layer {layer_name}\",\n )\n\n\ndef main(args: Namespace) -> None:\n convert_knossos(\n args.source_path,\n args.target_path,\n args.layer_name,\n args.dtype,\n args.voxel_size,\n args.data_format,\n args.chunk_shape,\n args.chunks_per_shard,\n args.mag,\n args,\n )\n\n\nif __name__ == \"__main__\":\n setup_warnings()\n args = create_parser().parse_args()\n setup_logging(args)\n\n main(args)\n", "path": "wkcuber/wkcuber/convert_knossos.py"}]}
| 2,630 | 247 |
gh_patches_debug_1646
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-2553
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cant delete mails through REST API endpoints
The endpoint to remove mails are fucked :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/authentication/api/views.py`
Content:
```
1 from django.contrib.auth.models import Group
2 from rest_framework import mixins, status, viewsets
3 from rest_framework.decorators import action
4 from rest_framework.permissions import AllowAny, IsAuthenticated
5 from rest_framework.response import Response
6
7 from apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup
8 from apps.authentication.models import OnlineUser as User
9 from apps.authentication.models import Position, SpecialPosition
10 from apps.authentication.serializers import (
11 AnonymizeUserSerializer,
12 EmailCreateSerializer,
13 EmailReadOnlySerializer,
14 EmailUpdateSerializer,
15 GroupMemberCreateSerializer,
16 GroupMemberReadOnlySerializer,
17 GroupMemberUpdateSerializer,
18 GroupReadOnlySerializer,
19 GroupRoleReadOnlySerializer,
20 OnlineGroupCreateOrUpdateSerializer,
21 OnlineGroupReadOnlySerializer,
22 PasswordUpdateSerializer,
23 PositionCreateAndUpdateSerializer,
24 PositionReadOnlySerializer,
25 SpecialPositionSerializer,
26 UserCreateSerializer,
27 UserReadOnlySerializer,
28 UserUpdateSerializer,
29 )
30 from apps.common.rest_framework.mixins import MultiSerializerMixin
31 from apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly
32
33 from .filters import UserFilter
34 from .permissions import IsSelfOrSuperUser
35 from .serializers.user_data import UserDataSerializer
36
37
38 class UserViewSet(
39 MultiSerializerMixin,
40 viewsets.GenericViewSet,
41 mixins.ListModelMixin,
42 mixins.RetrieveModelMixin,
43 mixins.CreateModelMixin,
44 mixins.UpdateModelMixin,
45 ):
46 """
47 Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'
48 """
49
50 permission_classes = (IsSelfOrSuperUser,)
51 filterset_class = UserFilter
52 queryset = User.objects.all()
53 serializer_classes = {
54 "create": UserCreateSerializer,
55 "update": UserUpdateSerializer,
56 "read": UserReadOnlySerializer,
57 "change_password": PasswordUpdateSerializer,
58 "anonymize_user": AnonymizeUserSerializer,
59 "dump_data": UserDataSerializer,
60 }
61
62 @action(detail=True, methods=["put"])
63 def change_password(self, request, pk=None):
64 user: User = self.get_object()
65 serializer = self.get_serializer(user, data=request.data)
66 serializer.is_valid(raise_exception=True)
67 serializer.save()
68
69 return Response(data=None, status=status.HTTP_204_NO_CONTENT)
70
71 @action(detail=True, methods=["put"])
72 def anonymize_user(self, request, pk=None):
73 user: User = self.get_object()
74 serializer = self.get_serializer(user, data=request.data)
75 serializer.is_valid(raise_exception=True)
76 serializer.save()
77
78 return Response(data=None, status=status.HTTP_204_NO_CONTENT)
79
80 @action(detail=True, methods=["get"], url_path="dump-data")
81 def dump_data(self, request, pk: int):
82 user: User = self.get_object()
83 serializer = self.get_serializer(user)
84 return Response(data=serializer.data, status=status.HTTP_200_OK)
85
86
87 class EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
88 permission_classes = (IsAuthenticated,)
89 serializer_classes = {
90 "create": EmailCreateSerializer,
91 "update": EmailUpdateSerializer,
92 "read": EmailReadOnlySerializer,
93 }
94
95 def get_queryset(self):
96 return Email.objects.filter(user=self.request.user)
97
98 def destroy(self, request, *args, **kwargs):
99 instance: Email = self.get_object()
100 if instance.primary:
101 return Response(
102 {
103 "message": "Du kan ikke slette en primær-epost. Du må først velge en annen epost som "
104 "primær for å kunne slette denne."
105 },
106 status=status.HTTP_400_BAD_REQUEST,
107 )
108
109
110 class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
111 permission_classes = (IsAuthenticated,)
112 serializer_classes = {
113 "read": PositionReadOnlySerializer,
114 "write": PositionCreateAndUpdateSerializer,
115 }
116
117 def get_queryset(self):
118 user = self.request.user
119 return Position.objects.filter(user=user)
120
121
122 class SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):
123 serializer_class = SpecialPositionSerializer
124 permission_classes = (IsAuthenticated,)
125
126 def get_queryset(self):
127 user = self.request.user
128 return SpecialPosition.objects.filter(user=user)
129
130
131 class GroupViewSet(viewsets.ReadOnlyModelViewSet):
132 permission_classes = (AllowAny,)
133 queryset = Group.objects.all()
134 serializer_class = GroupReadOnlySerializer
135 ordering = ("name",)
136
137
138 class OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
139 permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)
140 queryset = OnlineGroup.objects.all()
141 serializer_classes = {
142 "write": OnlineGroupCreateOrUpdateSerializer,
143 "read": OnlineGroupReadOnlySerializer,
144 }
145
146
147 class GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
148 permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)
149 queryset = GroupMember.objects.all()
150 serializer_classes = {
151 "create": GroupMemberCreateSerializer,
152 "update": GroupMemberUpdateSerializer,
153 "read": GroupMemberReadOnlySerializer,
154 }
155
156
157 class GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):
158 permission_classes = (AllowAny,)
159 serializer_class = GroupRoleReadOnlySerializer
160 queryset = GroupRole.objects.all()
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/authentication/api/views.py b/apps/authentication/api/views.py
--- a/apps/authentication/api/views.py
+++ b/apps/authentication/api/views.py
@@ -105,6 +105,7 @@
},
status=status.HTTP_400_BAD_REQUEST,
)
+ super().destroy(request, *args, **kwargs)
class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):
|
{"golden_diff": "diff --git a/apps/authentication/api/views.py b/apps/authentication/api/views.py\n--- a/apps/authentication/api/views.py\n+++ b/apps/authentication/api/views.py\n@@ -105,6 +105,7 @@\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n+ super().destroy(request, *args, **kwargs)\n \n \n class PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n", "issue": "Cant delete mails through REST API endpoints\nThe endpoint to remove mails are fucked :)\n", "before_files": [{"content": "from django.contrib.auth.models import Group\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import Position, SpecialPosition\nfrom apps.authentication.serializers import (\n AnonymizeUserSerializer,\n EmailCreateSerializer,\n EmailReadOnlySerializer,\n EmailUpdateSerializer,\n GroupMemberCreateSerializer,\n GroupMemberReadOnlySerializer,\n GroupMemberUpdateSerializer,\n GroupReadOnlySerializer,\n GroupRoleReadOnlySerializer,\n OnlineGroupCreateOrUpdateSerializer,\n OnlineGroupReadOnlySerializer,\n PasswordUpdateSerializer,\n PositionCreateAndUpdateSerializer,\n PositionReadOnlySerializer,\n SpecialPositionSerializer,\n UserCreateSerializer,\n UserReadOnlySerializer,\n UserUpdateSerializer,\n)\nfrom apps.common.rest_framework.mixins import MultiSerializerMixin\nfrom apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly\n\nfrom .filters import UserFilter\nfrom .permissions import IsSelfOrSuperUser\nfrom .serializers.user_data import UserDataSerializer\n\n\nclass UserViewSet(\n MultiSerializerMixin,\n viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"\n Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'\n \"\"\"\n\n permission_classes = (IsSelfOrSuperUser,)\n filterset_class = UserFilter\n queryset = User.objects.all()\n serializer_classes = {\n \"create\": UserCreateSerializer,\n \"update\": UserUpdateSerializer,\n \"read\": UserReadOnlySerializer,\n \"change_password\": PasswordUpdateSerializer,\n \"anonymize_user\": AnonymizeUserSerializer,\n \"dump_data\": UserDataSerializer,\n }\n\n @action(detail=True, methods=[\"put\"])\n def change_password(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"put\"])\n def anonymize_user(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"get\"], url_path=\"dump-data\")\n def dump_data(self, request, pk: int):\n user: User = self.get_object()\n serializer = self.get_serializer(user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"create\": EmailCreateSerializer,\n \"update\": EmailUpdateSerializer,\n \"read\": EmailReadOnlySerializer,\n }\n\n def get_queryset(self):\n return Email.objects.filter(user=self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n instance: Email = self.get_object()\n if instance.primary:\n return Response(\n {\n \"message\": \"Du kan ikke slette en prim\u00e6r-epost. Du m\u00e5 f\u00f8rst velge en annen epost som \"\n \"prim\u00e6r for \u00e5 kunne slette denne.\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n\nclass PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"read\": PositionReadOnlySerializer,\n \"write\": PositionCreateAndUpdateSerializer,\n }\n\n def get_queryset(self):\n user = self.request.user\n return Position.objects.filter(user=user)\n\n\nclass SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SpecialPositionSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return SpecialPosition.objects.filter(user=user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n queryset = Group.objects.all()\n serializer_class = GroupReadOnlySerializer\n ordering = (\"name\",)\n\n\nclass OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = OnlineGroup.objects.all()\n serializer_classes = {\n \"write\": OnlineGroupCreateOrUpdateSerializer,\n \"read\": OnlineGroupReadOnlySerializer,\n }\n\n\nclass GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = GroupMember.objects.all()\n serializer_classes = {\n \"create\": GroupMemberCreateSerializer,\n \"update\": GroupMemberUpdateSerializer,\n \"read\": GroupMemberReadOnlySerializer,\n }\n\n\nclass GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n serializer_class = GroupRoleReadOnlySerializer\n queryset = GroupRole.objects.all()\n", "path": "apps/authentication/api/views.py"}], "after_files": [{"content": "from django.contrib.auth.models import Group\nfrom rest_framework import mixins, status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.authentication.models import Email, GroupMember, GroupRole, OnlineGroup\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.authentication.models import Position, SpecialPosition\nfrom apps.authentication.serializers import (\n AnonymizeUserSerializer,\n EmailCreateSerializer,\n EmailReadOnlySerializer,\n EmailUpdateSerializer,\n GroupMemberCreateSerializer,\n GroupMemberReadOnlySerializer,\n GroupMemberUpdateSerializer,\n GroupReadOnlySerializer,\n GroupRoleReadOnlySerializer,\n OnlineGroupCreateOrUpdateSerializer,\n OnlineGroupReadOnlySerializer,\n PasswordUpdateSerializer,\n PositionCreateAndUpdateSerializer,\n PositionReadOnlySerializer,\n SpecialPositionSerializer,\n UserCreateSerializer,\n UserReadOnlySerializer,\n UserUpdateSerializer,\n)\nfrom apps.common.rest_framework.mixins import MultiSerializerMixin\nfrom apps.permissions.drf_permissions import DjangoObjectPermissionOrAnonReadOnly\n\nfrom .filters import UserFilter\nfrom .permissions import IsSelfOrSuperUser\nfrom .serializers.user_data import UserDataSerializer\n\n\nclass UserViewSet(\n MultiSerializerMixin,\n viewsets.GenericViewSet,\n mixins.ListModelMixin,\n mixins.RetrieveModelMixin,\n mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"\n Viewset for User serializer. Supports filtering on 'first_name', 'last_name', 'email'\n \"\"\"\n\n permission_classes = (IsSelfOrSuperUser,)\n filterset_class = UserFilter\n queryset = User.objects.all()\n serializer_classes = {\n \"create\": UserCreateSerializer,\n \"update\": UserUpdateSerializer,\n \"read\": UserReadOnlySerializer,\n \"change_password\": PasswordUpdateSerializer,\n \"anonymize_user\": AnonymizeUserSerializer,\n \"dump_data\": UserDataSerializer,\n }\n\n @action(detail=True, methods=[\"put\"])\n def change_password(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"put\"])\n def anonymize_user(self, request, pk=None):\n user: User = self.get_object()\n serializer = self.get_serializer(user, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(data=None, status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=True, methods=[\"get\"], url_path=\"dump-data\")\n def dump_data(self, request, pk: int):\n user: User = self.get_object()\n serializer = self.get_serializer(user)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n\nclass EmailViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"create\": EmailCreateSerializer,\n \"update\": EmailUpdateSerializer,\n \"read\": EmailReadOnlySerializer,\n }\n\n def get_queryset(self):\n return Email.objects.filter(user=self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n instance: Email = self.get_object()\n if instance.primary:\n return Response(\n {\n \"message\": \"Du kan ikke slette en prim\u00e6r-epost. Du m\u00e5 f\u00f8rst velge en annen epost som \"\n \"prim\u00e6r for \u00e5 kunne slette denne.\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n super().destroy(request, *args, **kwargs)\n\n\nclass PositionViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (IsAuthenticated,)\n serializer_classes = {\n \"read\": PositionReadOnlySerializer,\n \"write\": PositionCreateAndUpdateSerializer,\n }\n\n def get_queryset(self):\n user = self.request.user\n return Position.objects.filter(user=user)\n\n\nclass SpecialPositionViewSet(viewsets.ReadOnlyModelViewSet):\n serializer_class = SpecialPositionSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n user = self.request.user\n return SpecialPosition.objects.filter(user=user)\n\n\nclass GroupViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n queryset = Group.objects.all()\n serializer_class = GroupReadOnlySerializer\n ordering = (\"name\",)\n\n\nclass OnlineGroupViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = OnlineGroup.objects.all()\n serializer_classes = {\n \"write\": OnlineGroupCreateOrUpdateSerializer,\n \"read\": OnlineGroupReadOnlySerializer,\n }\n\n\nclass GroupMemberViewSet(MultiSerializerMixin, viewsets.ModelViewSet):\n permission_classes = (DjangoObjectPermissionOrAnonReadOnly,)\n queryset = GroupMember.objects.all()\n serializer_classes = {\n \"create\": GroupMemberCreateSerializer,\n \"update\": GroupMemberUpdateSerializer,\n \"read\": GroupMemberReadOnlySerializer,\n }\n\n\nclass GroupRoleViewSet(viewsets.ReadOnlyModelViewSet):\n permission_classes = (AllowAny,)\n serializer_class = GroupRoleReadOnlySerializer\n queryset = GroupRole.objects.all()\n", "path": "apps/authentication/api/views.py"}]}
| 1,756 | 88 |
gh_patches_debug_30213
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-528
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lutris 0.4.5 doesn’t launch (AssertionError)
I'm using Linux Mint 18.1 (my de is Mate if that's any help) and I received the update for Lutris 0.4.5. I tried to launch Lutris after that and nothing seemed to happen. I then tried to launch it from the terminal with the command "lutris" and saw the following:
```python
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/lutris/gui/application.py", line 152, in do_activate
self.window = LutrisWindow(application=self)
File "/usr/lib/python3/dist-packages/lutris/gui/lutriswindow.py", line 84, in init
desktopapps.sync_with_lutris()
File "/usr/lib/python3/dist-packages/lutris/util/desktopapps.py", line 87, in sync_with_lutris
mark_as_installed(appid, 'linux', game_info)
File "/usr/lib/python3/dist-packages/lutris/util/desktopapps.py", line 32, in mark_as_installed
assert game_info[key]
AssertionError
```
I do hope you fix it, as I run quite a few dosbox/scummvm games, and a Wine game with it, so I kind of need it for gaming. Thanks for the understanding.
P.S. I had also posted it on the forum, but overcame my reluctance to not creating many accounts online, as it's kind of important that this gets resolved.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/desktopapps.py`
Content:
```
1 """Get games installed as desktop applications."""
2
3 import os
4 import shutil
5 import shlex
6 import subprocess
7 import re
8
9 from gi.repository import Gio
10 from lutris import pga
11 from lutris.util.log import logger
12 from lutris.util.strings import slugify
13 from lutris.config import make_game_config_id, LutrisConfig
14
15 IGNORED_GAMES = (
16 "lutris", "mame", "dosbox", "playonlinux", "org.gnome.Games", "retroarch",
17 "steam", "steam-runtime", "steam-valve", "steam-native", "PlayOnLinux",
18 "fs-uae-arcade", "PCSX2", "ppsspp", "qchdman", "qmc2-sdlmame", "qmc2-arcade",
19 "sc-controller", "epsxe"
20 )
21 IGNORED_EXECUTABLES = (
22 "lutris", "steam"
23 )
24
25 IGNORED_CATEGORIES = (
26 "Emulator", "Development", "Utility"
27 )
28
29
30 def mark_as_installed(appid, runner_name, game_info):
31 for key in ['name', 'slug']:
32 assert game_info[key]
33 logger.info("Setting %s as installed" % game_info['name'])
34 config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))
35 game_id = pga.add_or_update(
36 name=game_info['name'],
37 runner=runner_name,
38 slug=game_info['slug'],
39 installed=1,
40 configpath=config_id,
41 installer_slug=game_info['installer_slug']
42 )
43
44 game_config = LutrisConfig(
45 runner_slug=runner_name,
46 game_config_id=config_id,
47 )
48 game_config.raw_game_config.update({'appid': appid, 'exe': game_info['exe'], 'args': game_info['args']})
49 game_config.save()
50 return game_id
51
52
53 def mark_as_uninstalled(game_info):
54 assert 'id' in game_info
55 assert 'name' in game_info
56 logger.info('Setting %s as uninstalled' % game_info['name'])
57 game_id = pga.add_or_update(
58 id=game_info['id'],
59 runner='',
60 installed=0
61 )
62 return game_id
63
64
65 def sync_with_lutris():
66 apps = get_games()
67 desktop_games_in_lutris = pga.get_desktop_games()
68 slugs_in_lutris = set([str(game['slug']) for game in desktop_games_in_lutris])
69
70 seen_slugs = set()
71 for app in apps:
72 game_info = None
73 name = app[0]
74 slug = slugify(name)
75 appid = app[1]
76 seen_slugs.add(slug)
77
78 if slug not in slugs_in_lutris:
79 game_info = {
80 'name': name,
81 'slug': slug,
82 'config_path': slug + '-desktopapp',
83 'installer_slug': 'desktopapp',
84 'exe': app[2],
85 'args': app[3]
86 }
87 mark_as_installed(appid, 'linux', game_info)
88
89 unavailable_slugs = slugs_in_lutris.difference(seen_slugs)
90 for slug in unavailable_slugs:
91 for game in desktop_games_in_lutris:
92 if game['slug'] == slug:
93 mark_as_uninstalled(game)
94
95
96 def get_games():
97 """Return the list of games stored in the XDG menu."""
98 game_list = []
99
100 apps = Gio.AppInfo.get_all()
101 for app in apps:
102 if app.get_nodisplay() or app.get_is_hidden():
103 continue
104 appid = os.path.splitext(app.get_id())[0]
105 exe = None
106 args = []
107
108 # must be in Game category
109 categories = app.get_categories()
110 if not categories:
111 continue
112 categories = filter(None, categories.lower().split(';'))
113 if 'game' not in categories:
114 continue
115
116 # contains a blacklisted category
117 ok = True
118 for c in categories:
119 if c in map(str.lower, IGNORED_CATEGORIES):
120 ok = False
121 if not ok:
122 continue
123
124 # game is blacklisted
125 if appid.lower() in map(str.lower, IGNORED_GAMES):
126 continue
127
128 # executable is blacklisted
129 if app.get_executable().lower() in IGNORED_EXECUTABLES:
130 continue
131
132 cli = shlex.split(app.get_commandline())
133 exe = cli[0]
134 args = cli[1:]
135 # remove %U etc. and change %% to % in arguments
136 args = list(map(lambda arg: re.sub('%[^%]', '', arg).replace('%%', '%'), args))
137
138 args = subprocess.list2cmdline(args)
139
140 if not exe.startswith('/'):
141 exe = shutil.which(exe)
142 game_list.append((app.get_display_name(), appid, exe, args))
143 return game_list
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/util/desktopapps.py b/lutris/util/desktopapps.py
--- a/lutris/util/desktopapps.py
+++ b/lutris/util/desktopapps.py
@@ -18,6 +18,7 @@
"fs-uae-arcade", "PCSX2", "ppsspp", "qchdman", "qmc2-sdlmame", "qmc2-arcade",
"sc-controller", "epsxe"
)
+
IGNORED_EXECUTABLES = (
"lutris", "steam"
)
@@ -41,12 +42,19 @@
installer_slug=game_info['installer_slug']
)
- game_config = LutrisConfig(
+ config = LutrisConfig(
runner_slug=runner_name,
game_config_id=config_id,
)
- game_config.raw_game_config.update({'appid': appid, 'exe': game_info['exe'], 'args': game_info['args']})
- game_config.save()
+ config.raw_game_config.update({
+ 'appid': appid,
+ 'exe': game_info['exe'],
+ 'args': game_info['args']
+ })
+ config.raw_system_config.update({
+ 'disable_runtime': True
+ })
+ config.save()
return game_id
@@ -71,8 +79,23 @@
for app in apps:
game_info = None
name = app[0]
- slug = slugify(name)
appid = app[1]
+ slug = slugify(name)
+
+ # if it fails to get slug from the name
+ if not slug:
+ slug = slugify(appid)
+
+ if not name or not slug or not appid:
+ logger.error("Failed to load desktop game "
+ "\"" + str(name) + "\" "
+ "(app: " + str(appid) + ", slug: " + slug + ")")
+ continue
+ else:
+ logger.debug("Found desktop game "
+ "\"" + str(name) + "\" "
+ "(app: " + str(appid) + ", slug: " + slug + ")")
+
seen_slugs.add(slug)
if slug not in slugs_in_lutris:
|
{"golden_diff": "diff --git a/lutris/util/desktopapps.py b/lutris/util/desktopapps.py\n--- a/lutris/util/desktopapps.py\n+++ b/lutris/util/desktopapps.py\n@@ -18,6 +18,7 @@\n \"fs-uae-arcade\", \"PCSX2\", \"ppsspp\", \"qchdman\", \"qmc2-sdlmame\", \"qmc2-arcade\",\n \"sc-controller\", \"epsxe\"\n )\n+\n IGNORED_EXECUTABLES = (\n \"lutris\", \"steam\"\n )\n@@ -41,12 +42,19 @@\n installer_slug=game_info['installer_slug']\n )\n \n- game_config = LutrisConfig(\n+ config = LutrisConfig(\n runner_slug=runner_name,\n game_config_id=config_id,\n )\n- game_config.raw_game_config.update({'appid': appid, 'exe': game_info['exe'], 'args': game_info['args']})\n- game_config.save()\n+ config.raw_game_config.update({\n+ 'appid': appid,\n+ 'exe': game_info['exe'],\n+ 'args': game_info['args']\n+ })\n+ config.raw_system_config.update({\n+ 'disable_runtime': True\n+ })\n+ config.save()\n return game_id\n \n \n@@ -71,8 +79,23 @@\n for app in apps:\n game_info = None\n name = app[0]\n- slug = slugify(name)\n appid = app[1]\n+ slug = slugify(name)\n+\n+ # if it fails to get slug from the name\n+ if not slug:\n+ slug = slugify(appid)\n+\n+ if not name or not slug or not appid:\n+ logger.error(\"Failed to load desktop game \"\n+ \"\\\"\" + str(name) + \"\\\" \"\n+ \"(app: \" + str(appid) + \", slug: \" + slug + \")\")\n+ continue\n+ else:\n+ logger.debug(\"Found desktop game \"\n+ \"\\\"\" + str(name) + \"\\\" \"\n+ \"(app: \" + str(appid) + \", slug: \" + slug + \")\")\n+\n seen_slugs.add(slug)\n \n if slug not in slugs_in_lutris:\n", "issue": "Lutris 0.4.5 doesn\u2019t launch (AssertionError)\nI'm using Linux Mint 18.1 (my de is Mate if that's any help) and I received the update for Lutris 0.4.5. I tried to launch Lutris after that and nothing seemed to happen. I then tried to launch it from the terminal with the command \"lutris\" and saw the following:\r\n\r\n```python\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3/dist-packages/lutris/gui/application.py\", line 152, in do_activate\r\nself.window = LutrisWindow(application=self)\r\nFile \"/usr/lib/python3/dist-packages/lutris/gui/lutriswindow.py\", line 84, in init\r\ndesktopapps.sync_with_lutris()\r\nFile \"/usr/lib/python3/dist-packages/lutris/util/desktopapps.py\", line 87, in sync_with_lutris\r\nmark_as_installed(appid, 'linux', game_info)\r\nFile \"/usr/lib/python3/dist-packages/lutris/util/desktopapps.py\", line 32, in mark_as_installed\r\nassert game_info[key]\r\nAssertionError\r\n```\r\n\r\nI do hope you fix it, as I run quite a few dosbox/scummvm games, and a Wine game with it, so I kind of need it for gaming. Thanks for the understanding.\r\nP.S. I had also posted it on the forum, but overcame my reluctance to not creating many accounts online, as it's kind of important that this gets resolved.\n", "before_files": [{"content": "\"\"\"Get games installed as desktop applications.\"\"\"\n\nimport os\nimport shutil\nimport shlex\nimport subprocess\nimport re\n\nfrom gi.repository import Gio\nfrom lutris import pga\nfrom lutris.util.log import logger\nfrom lutris.util.strings import slugify\nfrom lutris.config import make_game_config_id, LutrisConfig\n\nIGNORED_GAMES = (\n \"lutris\", \"mame\", \"dosbox\", \"playonlinux\", \"org.gnome.Games\", \"retroarch\",\n \"steam\", \"steam-runtime\", \"steam-valve\", \"steam-native\", \"PlayOnLinux\",\n \"fs-uae-arcade\", \"PCSX2\", \"ppsspp\", \"qchdman\", \"qmc2-sdlmame\", \"qmc2-arcade\",\n \"sc-controller\", \"epsxe\"\n)\nIGNORED_EXECUTABLES = (\n \"lutris\", \"steam\"\n)\n\nIGNORED_CATEGORIES = (\n \"Emulator\", \"Development\", \"Utility\"\n)\n\n\ndef mark_as_installed(appid, runner_name, game_info):\n for key in ['name', 'slug']:\n assert game_info[key]\n logger.info(\"Setting %s as installed\" % game_info['name'])\n config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))\n game_id = pga.add_or_update(\n name=game_info['name'],\n runner=runner_name,\n slug=game_info['slug'],\n installed=1,\n configpath=config_id,\n installer_slug=game_info['installer_slug']\n )\n\n game_config = LutrisConfig(\n runner_slug=runner_name,\n game_config_id=config_id,\n )\n game_config.raw_game_config.update({'appid': appid, 'exe': game_info['exe'], 'args': game_info['args']})\n game_config.save()\n return game_id\n\n\ndef mark_as_uninstalled(game_info):\n assert 'id' in game_info\n assert 'name' in game_info\n logger.info('Setting %s as uninstalled' % game_info['name'])\n game_id = pga.add_or_update(\n id=game_info['id'],\n runner='',\n installed=0\n )\n return game_id\n\n\ndef sync_with_lutris():\n apps = get_games()\n desktop_games_in_lutris = pga.get_desktop_games()\n slugs_in_lutris = set([str(game['slug']) for game in desktop_games_in_lutris])\n\n seen_slugs = set()\n for app in apps:\n game_info = None\n name = app[0]\n slug = slugify(name)\n appid = app[1]\n seen_slugs.add(slug)\n\n if slug not in slugs_in_lutris:\n game_info = {\n 'name': name,\n 'slug': slug,\n 'config_path': slug + '-desktopapp',\n 'installer_slug': 'desktopapp',\n 'exe': app[2],\n 'args': app[3]\n }\n mark_as_installed(appid, 'linux', game_info)\n\n unavailable_slugs = slugs_in_lutris.difference(seen_slugs)\n for slug in unavailable_slugs:\n for game in desktop_games_in_lutris:\n if game['slug'] == slug:\n mark_as_uninstalled(game)\n\n\ndef get_games():\n \"\"\"Return the list of games stored in the XDG menu.\"\"\"\n game_list = []\n\n apps = Gio.AppInfo.get_all()\n for app in apps:\n if app.get_nodisplay() or app.get_is_hidden():\n continue\n appid = os.path.splitext(app.get_id())[0]\n exe = None\n args = []\n\n # must be in Game category\n categories = app.get_categories()\n if not categories:\n continue\n categories = filter(None, categories.lower().split(';'))\n if 'game' not in categories:\n continue\n\n # contains a blacklisted category\n ok = True\n for c in categories:\n if c in map(str.lower, IGNORED_CATEGORIES):\n ok = False\n if not ok:\n continue\n\n # game is blacklisted\n if appid.lower() in map(str.lower, IGNORED_GAMES):\n continue\n\n # executable is blacklisted\n if app.get_executable().lower() in IGNORED_EXECUTABLES:\n continue\n\n cli = shlex.split(app.get_commandline())\n exe = cli[0]\n args = cli[1:]\n # remove %U etc. and change %% to % in arguments\n args = list(map(lambda arg: re.sub('%[^%]', '', arg).replace('%%', '%'), args))\n\n args = subprocess.list2cmdline(args)\n\n if not exe.startswith('/'):\n exe = shutil.which(exe)\n game_list.append((app.get_display_name(), appid, exe, args))\n return game_list\n", "path": "lutris/util/desktopapps.py"}], "after_files": [{"content": "\"\"\"Get games installed as desktop applications.\"\"\"\n\nimport os\nimport shutil\nimport shlex\nimport subprocess\nimport re\n\nfrom gi.repository import Gio\nfrom lutris import pga\nfrom lutris.util.log import logger\nfrom lutris.util.strings import slugify\nfrom lutris.config import make_game_config_id, LutrisConfig\n\nIGNORED_GAMES = (\n \"lutris\", \"mame\", \"dosbox\", \"playonlinux\", \"org.gnome.Games\", \"retroarch\",\n \"steam\", \"steam-runtime\", \"steam-valve\", \"steam-native\", \"PlayOnLinux\",\n \"fs-uae-arcade\", \"PCSX2\", \"ppsspp\", \"qchdman\", \"qmc2-sdlmame\", \"qmc2-arcade\",\n \"sc-controller\", \"epsxe\"\n)\n\nIGNORED_EXECUTABLES = (\n \"lutris\", \"steam\"\n)\n\nIGNORED_CATEGORIES = (\n \"Emulator\", \"Development\", \"Utility\"\n)\n\n\ndef mark_as_installed(appid, runner_name, game_info):\n for key in ['name', 'slug']:\n assert game_info[key]\n logger.info(\"Setting %s as installed\" % game_info['name'])\n config_id = (game_info.get('config_path') or make_game_config_id(game_info['slug']))\n game_id = pga.add_or_update(\n name=game_info['name'],\n runner=runner_name,\n slug=game_info['slug'],\n installed=1,\n configpath=config_id,\n installer_slug=game_info['installer_slug']\n )\n\n config = LutrisConfig(\n runner_slug=runner_name,\n game_config_id=config_id,\n )\n config.raw_game_config.update({\n 'appid': appid,\n 'exe': game_info['exe'],\n 'args': game_info['args']\n })\n config.raw_system_config.update({\n 'disable_runtime': True\n })\n config.save()\n return game_id\n\n\ndef mark_as_uninstalled(game_info):\n assert 'id' in game_info\n assert 'name' in game_info\n logger.info('Setting %s as uninstalled' % game_info['name'])\n game_id = pga.add_or_update(\n id=game_info['id'],\n runner='',\n installed=0\n )\n return game_id\n\n\ndef sync_with_lutris():\n apps = get_games()\n desktop_games_in_lutris = pga.get_desktop_games()\n slugs_in_lutris = set([str(game['slug']) for game in desktop_games_in_lutris])\n\n seen_slugs = set()\n for app in apps:\n game_info = None\n name = app[0]\n appid = app[1]\n slug = slugify(name)\n\n # if it fails to get slug from the name\n if not slug:\n slug = slugify(appid)\n\n if not name or not slug or not appid:\n logger.error(\"Failed to load desktop game \"\n \"\\\"\" + str(name) + \"\\\" \"\n \"(app: \" + str(appid) + \", slug: \" + slug + \")\")\n continue\n else:\n logger.debug(\"Found desktop game \"\n \"\\\"\" + str(name) + \"\\\" \"\n \"(app: \" + str(appid) + \", slug: \" + slug + \")\")\n\n seen_slugs.add(slug)\n\n if slug not in slugs_in_lutris:\n game_info = {\n 'name': name,\n 'slug': slug,\n 'config_path': slug + '-desktopapp',\n 'installer_slug': 'desktopapp',\n 'exe': app[2],\n 'args': app[3]\n }\n mark_as_installed(appid, 'linux', game_info)\n\n unavailable_slugs = slugs_in_lutris.difference(seen_slugs)\n for slug in unavailable_slugs:\n for game in desktop_games_in_lutris:\n if game['slug'] == slug:\n mark_as_uninstalled(game)\n\n\ndef get_games():\n \"\"\"Return the list of games stored in the XDG menu.\"\"\"\n game_list = []\n\n apps = Gio.AppInfo.get_all()\n for app in apps:\n if app.get_nodisplay() or app.get_is_hidden():\n continue\n appid = os.path.splitext(app.get_id())[0]\n exe = None\n args = []\n\n # must be in Game category\n categories = app.get_categories()\n if not categories:\n continue\n categories = filter(None, categories.lower().split(';'))\n if 'game' not in categories:\n continue\n\n # contains a blacklisted category\n ok = True\n for c in categories:\n if c in map(str.lower, IGNORED_CATEGORIES):\n ok = False\n if not ok:\n continue\n\n # game is blacklisted\n if appid.lower() in map(str.lower, IGNORED_GAMES):\n continue\n\n # executable is blacklisted\n if app.get_executable().lower() in IGNORED_EXECUTABLES:\n continue\n\n cli = shlex.split(app.get_commandline())\n exe = cli[0]\n args = cli[1:]\n # remove %U etc. and change %% to % in arguments\n args = list(map(lambda arg: re.sub('%[^%]', '', arg).replace('%%', '%'), args))\n\n args = subprocess.list2cmdline(args)\n\n if not exe.startswith('/'):\n exe = shutil.which(exe)\n game_list.append((app.get_display_name(), appid, exe, args))\n return game_list\n", "path": "lutris/util/desktopapps.py"}]}
| 1,974 | 500 |
gh_patches_debug_5467
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1173
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TN: 109th Session, Legislators, Upper - Senator name extraction fails for district 19
Trying to run the scraper locally, I encountered the following exception for TN's [19th senate district in the 109th general assembly](http://www.capitol.tn.gov/senate/archives/109GA/members/s19.html) (Sen. Thelma Hale):
```
File "/usr/local/bin/billy-update", line 9, in <module>
load_entry_point('billy==1.8.4', 'console_scripts', 'billy-update')()
File "/opt/sunlightfoundation.com/billy/billy/bin/update.py", line 377, in main
run_record += _run_scraper(stype, args, metadata)
File "/opt/sunlightfoundation.com/billy/billy/bin/update.py", line 101, in _run_scraper
scraper.scrape(chamber, time)
File "/srv/openstates-web/openstates/tn/legislators.py", line 81, in scrape
name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
IndexError: list index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/tn/legislators.py`
Content:
```
1 import HTMLParser
2
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 import lxml.html
5 from scrapelib import HTTPError
6 from openstates.utils import LXMLMixin
7
8 class TNLegislatorScraper(LegislatorScraper, LXMLMixin):
9 jurisdiction = 'tn'
10
11 def scrape(self, chamber, term):
12 self.validate_term(term, latest_only=False)
13 root_url = 'http://www.capitol.tn.gov/'
14 parties = {'D': 'Democratic', 'R': 'Republican',
15 'CCR': 'Carter County Republican',
16 'I': 'Independent'}
17
18 #testing for chamber
19 if chamber == 'upper':
20 url_chamber_name = 'senate'
21 abbr = 's'
22 else:
23 url_chamber_name = 'house'
24 abbr = 'h'
25 if term != self.metadata["terms"][-1]["sessions"][0]:
26 chamber_url = root_url + url_chamber_name
27 chamber_url += '/archives/' + term + 'GA/Members/index.html'
28 else:
29 chamber_url = root_url + url_chamber_name + '/members/'
30
31 page = self.lxmlize(chamber_url)
32
33 for row in page.xpath("//tr"):
34
35 # Skip any a header row.
36 if set(child.tag for child in row) == set(['th']):
37 continue
38
39 vacancy_check = row.xpath('./td/text()')[1]
40 if 'Vacant' in vacancy_check:
41 self.logger.warning("Vacant Seat")
42 continue
43
44 partyInit = row.xpath('td[3]')[0].text.split()[0]
45 party = parties[partyInit]
46 district = row.xpath('td[5]/a')[0].text.split()[1]
47 address = row.xpath('td[6]')[0].text_content()
48 # 301 6th Avenue North Suite
49 address = address.replace('LP',
50 'Legislative Plaza\nNashville, TN 37243')
51 address = address.replace('WMB',
52 'War Memorial Building\nNashville, TN 37243')
53 address = '301 6th Avenue North\nSuite ' + address
54 phone = [
55 x.strip() for x in
56 row.xpath('td[7]//text()')
57 if x.strip()
58 ][0]
59
60 email = HTMLParser.HTMLParser().unescape(
61 row.xpath('td[1]/a/@href')[0][len("mailto:"): ])
62 member_url = (root_url + url_chamber_name + '/members/' + abbr +
63 district + '.html')
64 member_photo_url = (root_url + url_chamber_name +
65 '/members/images/' + abbr + district + '.jpg')
66
67 try:
68 member_page = self.get(member_url, allow_redirects=False).text
69 except (TypeError, HTTPError):
70 try:
71 member_url = row.xpath('td[2]/a/@href')[0]
72 member_page = self.get(member_url, allow_redirects=False).text
73 except (TypeError, HTTPError):
74 self.logger.warning("Valid member page does not exist.")
75 continue
76
77 member_page = lxml.html.fromstring(member_page)
78 try:
79 name = member_page.xpath('body/div/div/h1/text()')[0]
80 except IndexError:
81 name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
82
83 if 'Speaker' in name:
84 full_name = name[8:len(name)]
85 elif 'Lt.' in name:
86 full_name = name[13:len(name)]
87 elif abbr == 'h':
88 full_name = name[len("Representative "): len(name)]
89 else:
90 full_name = name[8:len(name)]
91
92 leg = Legislator(term, chamber, district, full_name.strip(),
93 party=party, url=member_url,
94 photo_url=member_photo_url)
95 leg.add_source(chamber_url)
96 leg.add_source(member_url)
97
98 # TODO: add district address from this page
99
100 leg.add_office('capitol', 'Nashville Address',
101 address=address, phone=phone, email=email)
102
103 self.save_legislator(leg)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/tn/legislators.py b/openstates/tn/legislators.py
--- a/openstates/tn/legislators.py
+++ b/openstates/tn/legislators.py
@@ -76,7 +76,7 @@
member_page = lxml.html.fromstring(member_page)
try:
- name = member_page.xpath('body/div/div/h1/text()')[0]
+ name = member_page.xpath('//div/div/h1/text()')[0]
except IndexError:
name = member_page.xpath('//div[@id="membertitle"]/h2/text()')[0]
|
{"golden_diff": "diff --git a/openstates/tn/legislators.py b/openstates/tn/legislators.py\n--- a/openstates/tn/legislators.py\n+++ b/openstates/tn/legislators.py\n@@ -76,7 +76,7 @@\n \n member_page = lxml.html.fromstring(member_page)\n try:\n- name = member_page.xpath('body/div/div/h1/text()')[0]\n+ name = member_page.xpath('//div/div/h1/text()')[0]\n except IndexError:\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\n", "issue": "TN: 109th Session, Legislators, Upper - Senator name extraction fails for district 19\nTrying to run the scraper locally, I encountered the following exception for TN's [19th senate district in the 109th general assembly](http://www.capitol.tn.gov/senate/archives/109GA/members/s19.html) (Sen. Thelma Hale):\r\n\r\n```\r\nFile \"/usr/local/bin/billy-update\", line 9, in <module>\r\n load_entry_point('billy==1.8.4', 'console_scripts', 'billy-update')()\r\n File \"/opt/sunlightfoundation.com/billy/billy/bin/update.py\", line 377, in main\r\n run_record += _run_scraper(stype, args, metadata)\r\n File \"/opt/sunlightfoundation.com/billy/billy/bin/update.py\", line 101, in _run_scraper\r\n scraper.scrape(chamber, time)\r\n File \"/srv/openstates-web/openstates/tn/legislators.py\", line 81, in scrape\r\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "import HTMLParser\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nimport lxml.html\nfrom scrapelib import HTTPError\nfrom openstates.utils import LXMLMixin\n\nclass TNLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'tn'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=False)\n root_url = 'http://www.capitol.tn.gov/'\n parties = {'D': 'Democratic', 'R': 'Republican',\n 'CCR': 'Carter County Republican',\n 'I': 'Independent'}\n\n #testing for chamber\n if chamber == 'upper':\n url_chamber_name = 'senate'\n abbr = 's'\n else:\n url_chamber_name = 'house'\n abbr = 'h'\n if term != self.metadata[\"terms\"][-1][\"sessions\"][0]:\n chamber_url = root_url + url_chamber_name\n chamber_url += '/archives/' + term + 'GA/Members/index.html'\n else:\n chamber_url = root_url + url_chamber_name + '/members/'\n\n page = self.lxmlize(chamber_url)\n\n for row in page.xpath(\"//tr\"):\n\n # Skip any a header row.\n if set(child.tag for child in row) == set(['th']):\n continue\n\n vacancy_check = row.xpath('./td/text()')[1]\n if 'Vacant' in vacancy_check:\n self.logger.warning(\"Vacant Seat\")\n continue\n\n partyInit = row.xpath('td[3]')[0].text.split()[0]\n party = parties[partyInit]\n district = row.xpath('td[5]/a')[0].text.split()[1]\n address = row.xpath('td[6]')[0].text_content()\n # 301 6th Avenue North Suite\n address = address.replace('LP',\n 'Legislative Plaza\\nNashville, TN 37243')\n address = address.replace('WMB',\n 'War Memorial Building\\nNashville, TN 37243')\n address = '301 6th Avenue North\\nSuite ' + address\n phone = [\n x.strip() for x in\n row.xpath('td[7]//text()')\n if x.strip()\n ][0]\n\n email = HTMLParser.HTMLParser().unescape(\n row.xpath('td[1]/a/@href')[0][len(\"mailto:\"): ])\n member_url = (root_url + url_chamber_name + '/members/' + abbr +\n district + '.html')\n member_photo_url = (root_url + url_chamber_name +\n '/members/images/' + abbr + district + '.jpg')\n\n try:\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n try:\n member_url = row.xpath('td[2]/a/@href')[0]\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n self.logger.warning(\"Valid member page does not exist.\")\n continue\n\n member_page = lxml.html.fromstring(member_page)\n try:\n name = member_page.xpath('body/div/div/h1/text()')[0]\n except IndexError:\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\n \n if 'Speaker' in name:\n full_name = name[8:len(name)]\n elif 'Lt.' in name:\n full_name = name[13:len(name)]\n elif abbr == 'h':\n full_name = name[len(\"Representative \"): len(name)]\n else:\n full_name = name[8:len(name)]\n\n leg = Legislator(term, chamber, district, full_name.strip(),\n party=party, url=member_url,\n photo_url=member_photo_url)\n leg.add_source(chamber_url)\n leg.add_source(member_url)\n\n # TODO: add district address from this page\n\n leg.add_office('capitol', 'Nashville Address',\n address=address, phone=phone, email=email)\n\n self.save_legislator(leg)\n", "path": "openstates/tn/legislators.py"}], "after_files": [{"content": "import HTMLParser\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nimport lxml.html\nfrom scrapelib import HTTPError\nfrom openstates.utils import LXMLMixin\n\nclass TNLegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'tn'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=False)\n root_url = 'http://www.capitol.tn.gov/'\n parties = {'D': 'Democratic', 'R': 'Republican',\n 'CCR': 'Carter County Republican',\n 'I': 'Independent'}\n\n #testing for chamber\n if chamber == 'upper':\n url_chamber_name = 'senate'\n abbr = 's'\n else:\n url_chamber_name = 'house'\n abbr = 'h'\n if term != self.metadata[\"terms\"][-1][\"sessions\"][0]:\n chamber_url = root_url + url_chamber_name\n chamber_url += '/archives/' + term + 'GA/Members/index.html'\n else:\n chamber_url = root_url + url_chamber_name + '/members/'\n\n page = self.lxmlize(chamber_url)\n\n for row in page.xpath(\"//tr\"):\n\n # Skip any a header row.\n if set(child.tag for child in row) == set(['th']):\n continue\n\n vacancy_check = row.xpath('./td/text()')[1]\n if 'Vacant' in vacancy_check:\n self.logger.warning(\"Vacant Seat\")\n continue\n\n partyInit = row.xpath('td[3]')[0].text.split()[0]\n party = parties[partyInit]\n district = row.xpath('td[5]/a')[0].text.split()[1]\n address = row.xpath('td[6]')[0].text_content()\n # 301 6th Avenue North Suite\n address = address.replace('LP',\n 'Legislative Plaza\\nNashville, TN 37243')\n address = address.replace('WMB',\n 'War Memorial Building\\nNashville, TN 37243')\n address = '301 6th Avenue North\\nSuite ' + address\n phone = [\n x.strip() for x in\n row.xpath('td[7]//text()')\n if x.strip()\n ][0]\n\n email = HTMLParser.HTMLParser().unescape(\n row.xpath('td[1]/a/@href')[0][len(\"mailto:\"): ])\n member_url = (root_url + url_chamber_name + '/members/' + abbr +\n district + '.html')\n member_photo_url = (root_url + url_chamber_name +\n '/members/images/' + abbr + district + '.jpg')\n\n try:\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n try:\n member_url = row.xpath('td[2]/a/@href')[0]\n member_page = self.get(member_url, allow_redirects=False).text\n except (TypeError, HTTPError):\n self.logger.warning(\"Valid member page does not exist.\")\n continue\n\n member_page = lxml.html.fromstring(member_page)\n try:\n name = member_page.xpath('//div/div/h1/text()')[0]\n except IndexError:\n name = member_page.xpath('//div[@id=\"membertitle\"]/h2/text()')[0]\n \n if 'Speaker' in name:\n full_name = name[8:len(name)]\n elif 'Lt.' in name:\n full_name = name[13:len(name)]\n elif abbr == 'h':\n full_name = name[len(\"Representative \"): len(name)]\n else:\n full_name = name[8:len(name)]\n\n leg = Legislator(term, chamber, district, full_name.strip(),\n party=party, url=member_url,\n photo_url=member_photo_url)\n leg.add_source(chamber_url)\n leg.add_source(member_url)\n\n # TODO: add district address from this page\n\n leg.add_office('capitol', 'Nashville Address',\n address=address, phone=phone, email=email)\n\n self.save_legislator(leg)\n", "path": "openstates/tn/legislators.py"}]}
| 1,653 | 137 |
gh_patches_debug_30388
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-185
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
move hydra generated files into an output directory relative to the job dir
automatically generated files like config.yaml, overrides.yaml, and others can go here.
Also additional debug information about the hydra config composition order.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/plugins/common/utils.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 import copy
4 import logging
5 import os
6 import re
7 import sys
8 from time import strftime, localtime
9
10 import six
11 from omegaconf import OmegaConf, DictConfig, ListConfig
12
13 # pylint: disable=C0103
14 log = logging.getLogger(__name__)
15
16
17 def configure_log(log_config, verbose_config):
18 assert isinstance(verbose_config, (bool, str, ListConfig))
19 if log_config is not None:
20 conf = log_config.to_container(resolve=True)
21 logging.config.dictConfig(conf)
22 else:
23 # default logging to stdout
24 root = logging.getLogger()
25 root.setLevel(logging.INFO)
26 handler = logging.StreamHandler(sys.stdout)
27 formatter = logging.Formatter(
28 "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s"
29 )
30 handler.setFormatter(formatter)
31 root.addHandler(handler)
32 if isinstance(verbose_config, bool):
33 if verbose_config:
34 logging.getLogger().setLevel(logging.DEBUG)
35 else:
36 if isinstance(verbose_config, str):
37 verbose_config = OmegaConf.create([verbose_config])
38 for logger in verbose_config:
39 logging.getLogger(logger).setLevel(logging.DEBUG)
40
41
42 def save_config(cfg, filename):
43 with open(os.path.join(filename), "w") as file:
44 file.write(cfg.pretty())
45
46
47 def get_overrides_dirname(lst, exclude_keys=[]):
48 lst = [x for x in lst if x not in exclude_keys]
49 lst.sort()
50 return re.sub(pattern="[=]", repl="=", string=",".join(lst))
51
52
53 def filter_overrides(overrides):
54 """
55 :param overrides: overrides list
56 :return: returning a new overrides list with all the keys starting with hydra. fitlered.
57 """
58 return [x for x in overrides if not x.startswith("hydra.")]
59
60
61 def run_job(config, task_function, job_dir_key, job_subdir_key):
62 old_cwd = os.getcwd()
63 working_dir = str(config.select(job_dir_key))
64 if job_subdir_key is not None:
65 # evaluate job_subdir_key lazily.
66 # this is running on the client side in sweep and contains things such as job:id which
67 # are only available there.
68 subdir = str(config.select(job_subdir_key))
69 working_dir = os.path.join(working_dir, subdir)
70 try:
71 ret = JobReturn()
72 ret.working_dir = working_dir
73 task_cfg = copy.deepcopy(config)
74 # TODO: update this after https://github.com/omry/omegaconf/issues/42 is resolved
75 hydra_cfg = OmegaConf.create({"hydra": task_cfg["hydra"]})
76 del task_cfg["hydra"]
77 ret.cfg = task_cfg
78 ret.hydra_cfg = copy.deepcopy(HydraConfig())
79 ret.overrides = config.hydra.overrides.task.to_container()
80 if not os.path.exists(working_dir):
81 os.makedirs(working_dir)
82 os.chdir(working_dir)
83 configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)
84
85 save_config(task_cfg, "config.yaml")
86 save_config(hydra_cfg, "hydra.yaml")
87 save_config(config.hydra.overrides.task, "overrides.yaml")
88 ret.return_value = task_function(task_cfg)
89 ret.task_name = JobRuntime().get("name")
90 return ret
91 finally:
92 os.chdir(old_cwd)
93
94
95 def get_valid_filename(s):
96 s = str(s).strip().replace(" ", "_")
97 return re.sub(r"(?u)[^-\w.]", "", s)
98
99
100 def setup_globals():
101 try:
102 OmegaConf.register_resolver(
103 "now", lambda pattern: strftime(pattern, localtime())
104 )
105
106 def job_error(x):
107 raise Exception(
108 "job:{} is no longer available. use hydra.job.{}".format(x, x)
109 )
110
111 OmegaConf.register_resolver("job", job_error)
112
113 except AssertionError:
114 # calling it again in no_workers mode will throw. safe to ignore.
115 pass
116
117
118 class JobReturn:
119 def __init__(self):
120 self.overrides = None
121 self.return_value = None
122 self.cfg = None
123 self.hydra_cfg = None
124 self.working_dir = None
125 self.task_name = None
126
127
128 class Singleton(type):
129 _instances = {}
130
131 def __call__(cls, *args, **kwargs):
132 if cls not in cls._instances:
133 cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
134 return cls._instances[cls]
135
136 @staticmethod
137 def get_state():
138 return Singleton._instances
139
140 @staticmethod
141 def set_state(instances):
142 Singleton._instances = instances
143
144
145 @six.add_metaclass(Singleton)
146 class JobRuntime:
147 def __init__(self):
148 self.conf = OmegaConf.create()
149 self.set("name", "UNKNOWN_NAME")
150
151 def get(self, key):
152 ret = self.conf.select(key)
153 if ret is None:
154 raise KeyError("Key not found in {}: {}".format(type(self).__name__, key))
155 return ret
156
157 def set(self, key, value):
158 log.debug("Setting {}:{}={}".format(type(self).__name__, key, value))
159 self.conf[key] = value
160
161
162 @six.add_metaclass(Singleton)
163 class HydraConfig(DictConfig):
164 def __init__(self):
165 super(HydraConfig, self).__init__(content={})
166 self.hydra = None
167
168 def set_config(self, cfg):
169 try:
170 OmegaConf.set_readonly(self, False)
171 self.hydra = copy.deepcopy(cfg.hydra)
172 finally:
173 OmegaConf.set_readonly(self, True)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hydra/plugins/common/utils.py b/hydra/plugins/common/utils.py
--- a/hydra/plugins/common/utils.py
+++ b/hydra/plugins/common/utils.py
@@ -5,6 +5,7 @@
import os
import re
import sys
+from hydra._internal.pathlib import Path
from time import strftime, localtime
import six
@@ -39,8 +40,9 @@
logging.getLogger(logger).setLevel(logging.DEBUG)
-def save_config(cfg, filename):
- with open(os.path.join(filename), "w") as file:
+def _save_config(cfg, filename, output_dir):
+ Path(str(output_dir)).mkdir(parents=True, exist_ok=True)
+ with open(str(output_dir / filename), "w") as file:
file.write(cfg.pretty())
@@ -77,14 +79,15 @@
ret.cfg = task_cfg
ret.hydra_cfg = copy.deepcopy(HydraConfig())
ret.overrides = config.hydra.overrides.task.to_container()
- if not os.path.exists(working_dir):
- os.makedirs(working_dir)
+ # handle output directories here
+ Path(str(working_dir)).mkdir(parents=True, exist_ok=True)
os.chdir(working_dir)
- configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)
+ hydra_output = Path(hydra_cfg.hydra.output_subdir)
- save_config(task_cfg, "config.yaml")
- save_config(hydra_cfg, "hydra.yaml")
- save_config(config.hydra.overrides.task, "overrides.yaml")
+ configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)
+ _save_config(task_cfg, "config.yaml", hydra_output)
+ _save_config(hydra_cfg, "hydra.yaml", hydra_output)
+ _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output)
ret.return_value = task_function(task_cfg)
ret.task_name = JobRuntime().get("name")
return ret
|
{"golden_diff": "diff --git a/hydra/plugins/common/utils.py b/hydra/plugins/common/utils.py\n--- a/hydra/plugins/common/utils.py\n+++ b/hydra/plugins/common/utils.py\n@@ -5,6 +5,7 @@\n import os\n import re\n import sys\n+from hydra._internal.pathlib import Path\n from time import strftime, localtime\n \n import six\n@@ -39,8 +40,9 @@\n logging.getLogger(logger).setLevel(logging.DEBUG)\n \n \n-def save_config(cfg, filename):\n- with open(os.path.join(filename), \"w\") as file:\n+def _save_config(cfg, filename, output_dir):\n+ Path(str(output_dir)).mkdir(parents=True, exist_ok=True)\n+ with open(str(output_dir / filename), \"w\") as file:\n file.write(cfg.pretty())\n \n \n@@ -77,14 +79,15 @@\n ret.cfg = task_cfg\n ret.hydra_cfg = copy.deepcopy(HydraConfig())\n ret.overrides = config.hydra.overrides.task.to_container()\n- if not os.path.exists(working_dir):\n- os.makedirs(working_dir)\n+ # handle output directories here\n+ Path(str(working_dir)).mkdir(parents=True, exist_ok=True)\n os.chdir(working_dir)\n- configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)\n+ hydra_output = Path(hydra_cfg.hydra.output_subdir)\n \n- save_config(task_cfg, \"config.yaml\")\n- save_config(hydra_cfg, \"hydra.yaml\")\n- save_config(config.hydra.overrides.task, \"overrides.yaml\")\n+ configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)\n+ _save_config(task_cfg, \"config.yaml\", hydra_output)\n+ _save_config(hydra_cfg, \"hydra.yaml\", hydra_output)\n+ _save_config(config.hydra.overrides.task, \"overrides.yaml\", hydra_output)\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime().get(\"name\")\n return ret\n", "issue": "move hydra generated files into an output directory relative to the job dir\nautomatically generated files like config.yaml, overrides.yaml, and others can go here.\r\nAlso additional debug information about the hydra config composition order.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nfrom time import strftime, localtime\n\nimport six\nfrom omegaconf import OmegaConf, DictConfig, ListConfig\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(log_config, verbose_config):\n assert isinstance(verbose_config, (bool, str, ListConfig))\n if log_config is not None:\n conf = log_config.to_container(resolve=True)\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n if isinstance(verbose_config, bool):\n if verbose_config:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n if isinstance(verbose_config, str):\n verbose_config = OmegaConf.create([verbose_config])\n for logger in verbose_config:\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef save_config(cfg, filename):\n with open(os.path.join(filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef get_overrides_dirname(lst, exclude_keys=[]):\n lst = [x for x in lst if x not in exclude_keys]\n lst.sort()\n return re.sub(pattern=\"[=]\", repl=\"=\", string=\",\".join(lst))\n\n\ndef filter_overrides(overrides):\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. fitlered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(config, task_function, job_dir_key, job_subdir_key):\n old_cwd = os.getcwd()\n working_dir = str(config.select(job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(config.select(job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n # TODO: update this after https://github.com/omry/omegaconf/issues/42 is resolved\n hydra_cfg = OmegaConf.create({\"hydra\": task_cfg[\"hydra\"]})\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = copy.deepcopy(HydraConfig())\n ret.overrides = config.hydra.overrides.task.to_container()\n if not os.path.exists(working_dir):\n os.makedirs(working_dir)\n os.chdir(working_dir)\n configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)\n\n save_config(task_cfg, \"config.yaml\")\n save_config(hydra_cfg, \"hydra.yaml\")\n save_config(config.hydra.overrides.task, \"overrides.yaml\")\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime().get(\"name\")\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s):\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals():\n try:\n OmegaConf.register_resolver(\n \"now\", lambda pattern: strftime(pattern, localtime())\n )\n\n def job_error(x):\n raise Exception(\n \"job:{} is no longer available. use hydra.job.{}\".format(x, x)\n )\n\n OmegaConf.register_resolver(\"job\", job_error)\n\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n\nclass JobReturn:\n def __init__(self):\n self.overrides = None\n self.return_value = None\n self.cfg = None\n self.hydra_cfg = None\n self.working_dir = None\n self.task_name = None\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n @staticmethod\n def get_state():\n return Singleton._instances\n\n @staticmethod\n def set_state(instances):\n Singleton._instances = instances\n\n\[email protected]_metaclass(Singleton)\nclass JobRuntime:\n def __init__(self):\n self.conf = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key):\n ret = self.conf.select(key)\n if ret is None:\n raise KeyError(\"Key not found in {}: {}\".format(type(self).__name__, key))\n return ret\n\n def set(self, key, value):\n log.debug(\"Setting {}:{}={}\".format(type(self).__name__, key, value))\n self.conf[key] = value\n\n\[email protected]_metaclass(Singleton)\nclass HydraConfig(DictConfig):\n def __init__(self):\n super(HydraConfig, self).__init__(content={})\n self.hydra = None\n\n def set_config(self, cfg):\n try:\n OmegaConf.set_readonly(self, False)\n self.hydra = copy.deepcopy(cfg.hydra)\n finally:\n OmegaConf.set_readonly(self, True)\n", "path": "hydra/plugins/common/utils.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nfrom hydra._internal.pathlib import Path\nfrom time import strftime, localtime\n\nimport six\nfrom omegaconf import OmegaConf, DictConfig, ListConfig\n\n# pylint: disable=C0103\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(log_config, verbose_config):\n assert isinstance(verbose_config, (bool, str, ListConfig))\n if log_config is not None:\n conf = log_config.to_container(resolve=True)\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n if isinstance(verbose_config, bool):\n if verbose_config:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n if isinstance(verbose_config, str):\n verbose_config = OmegaConf.create([verbose_config])\n for logger in verbose_config:\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef _save_config(cfg, filename, output_dir):\n Path(str(output_dir)).mkdir(parents=True, exist_ok=True)\n with open(str(output_dir / filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef get_overrides_dirname(lst, exclude_keys=[]):\n lst = [x for x in lst if x not in exclude_keys]\n lst.sort()\n return re.sub(pattern=\"[=]\", repl=\"=\", string=\",\".join(lst))\n\n\ndef filter_overrides(overrides):\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. fitlered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(config, task_function, job_dir_key, job_subdir_key):\n old_cwd = os.getcwd()\n working_dir = str(config.select(job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(config.select(job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n # TODO: update this after https://github.com/omry/omegaconf/issues/42 is resolved\n hydra_cfg = OmegaConf.create({\"hydra\": task_cfg[\"hydra\"]})\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = copy.deepcopy(HydraConfig())\n ret.overrides = config.hydra.overrides.task.to_container()\n # handle output directories here\n Path(str(working_dir)).mkdir(parents=True, exist_ok=True)\n os.chdir(working_dir)\n hydra_output = Path(hydra_cfg.hydra.output_subdir)\n\n configure_log(hydra_cfg.hydra.job_logging, hydra_cfg.hydra.verbose)\n _save_config(task_cfg, \"config.yaml\", hydra_output)\n _save_config(hydra_cfg, \"hydra.yaml\", hydra_output)\n _save_config(config.hydra.overrides.task, \"overrides.yaml\", hydra_output)\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime().get(\"name\")\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s):\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals():\n try:\n OmegaConf.register_resolver(\n \"now\", lambda pattern: strftime(pattern, localtime())\n )\n\n def job_error(x):\n raise Exception(\n \"job:{} is no longer available. use hydra.job.{}\".format(x, x)\n )\n\n OmegaConf.register_resolver(\"job\", job_error)\n\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n\nclass JobReturn:\n def __init__(self):\n self.overrides = None\n self.return_value = None\n self.cfg = None\n self.hydra_cfg = None\n self.working_dir = None\n self.task_name = None\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n @staticmethod\n def get_state():\n return Singleton._instances\n\n @staticmethod\n def set_state(instances):\n Singleton._instances = instances\n\n\[email protected]_metaclass(Singleton)\nclass JobRuntime:\n def __init__(self):\n self.conf = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key):\n ret = self.conf.select(key)\n if ret is None:\n raise KeyError(\"Key not found in {}: {}\".format(type(self).__name__, key))\n return ret\n\n def set(self, key, value):\n log.debug(\"Setting {}:{}={}\".format(type(self).__name__, key, value))\n self.conf[key] = value\n\n\[email protected]_metaclass(Singleton)\nclass HydraConfig(DictConfig):\n def __init__(self):\n super(HydraConfig, self).__init__(content={})\n self.hydra = None\n\n def set_config(self, cfg):\n try:\n OmegaConf.set_readonly(self, False)\n self.hydra = copy.deepcopy(cfg.hydra)\n finally:\n OmegaConf.set_readonly(self, True)\n", "path": "hydra/plugins/common/utils.py"}]}
| 1,951 | 454 |
gh_patches_debug_3144
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-2588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stream in the context-manager form is not used in `ElementwiseKernel` or `ReductionKernel`
This is actually a bug reported back in #1695 that unfortunately went unnoticed.
In `examples/stream/map_reduce.py`, a list of streams was created for executing `cupy.matmul()` in parallel, which is backed by a `ReductionKernel` in this case: https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/core.pyx#L2513-L2516
However, inspecting the implementation I found that `ReductionKernel` only accepts an explicit `stream` argument; it does not pick up any current stream: https://github.com/cupy/cupy/blob/32718607a7808ec6bc3a24cf9231a9351f8fc95e/cupy/core/reduction.pxi#L396
In other words, that example was misleading because those streams were not used at all and so all executions were serialized, as can be checked from nvprof + nvvp (see the circle in red):
<img width="972" alt="螢幕快照 2019-10-03 上午11 24 27" src="https://user-images.githubusercontent.com/5534781/66140715-978bf180-e5d0-11e9-8228-f613a3eba6fd.png">
The same bug also appears in `ElementwiseKernel`:
https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/_kernel.pyx#L537
In my opinion, unlike `RawKernel` which is not used by any CuPy core functionalities, `ElementwiseKernel` and `ReductionKernel` should honor the current stream by checking the current stream pointer if no stream argument is explicitly given, since many CuPy functions like `cupy.matmul()` do not support passing in a stream. A similar approach is already adopted in the FFT module, see #2362.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/stream/map_reduce.py`
Content:
```
1 import cupy
2 import time
3
4 device = cupy.cuda.Device()
5 memory_pool = cupy.cuda.MemoryPool()
6 cupy.cuda.set_allocator(memory_pool.malloc)
7 rand = cupy.random.generator.RandomState(seed=1)
8
9 n = 10
10 zs = []
11 map_streams = []
12 stop_events = []
13 reduce_stream = cupy.cuda.stream.Stream()
14 for i in range(n):
15 map_streams.append(cupy.cuda.stream.Stream())
16
17 start_time = time.time()
18
19 # Map
20 for stream in map_streams:
21 with stream:
22 x = rand.normal(size=(1, 1024 * 256))
23 y = rand.normal(size=(1024 * 256, 1))
24 z = cupy.matmul(x, y)
25 zs.append(z)
26 stop_event = stream.record()
27 stop_events.append(stop_event)
28
29 # Block the `reduce_stream` until all events occur. This does not block host.
30 # This is not required when reduction is performed in the default (Stream.null)
31 # stream unless streams are created with `non_blocking=True` flag.
32 for i in range(n):
33 reduce_stream.wait_event(stop_events[i])
34
35 # Reduce
36 with reduce_stream:
37 z = sum(zs)
38
39 device.synchronize()
40 elapsed_time = time.time() - start_time
41 print('elapsed time', elapsed_time)
42 print('total bytes', memory_pool.total_bytes())
43
44 # Free all blocks in the memory pool of streams
45 for stream in map_streams:
46 memory_pool.free_all_blocks(stream=stream)
47 print('total bytes', memory_pool.total_bytes())
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/stream/map_reduce.py b/examples/stream/map_reduce.py
--- a/examples/stream/map_reduce.py
+++ b/examples/stream/map_reduce.py
@@ -19,8 +19,8 @@
# Map
for stream in map_streams:
with stream:
- x = rand.normal(size=(1, 1024 * 256))
- y = rand.normal(size=(1024 * 256, 1))
+ x = rand.normal(size=(1, 1024**2))
+ y = rand.normal(size=(1024**2, 1))
z = cupy.matmul(x, y)
zs.append(z)
stop_event = stream.record()
|
{"golden_diff": "diff --git a/examples/stream/map_reduce.py b/examples/stream/map_reduce.py\n--- a/examples/stream/map_reduce.py\n+++ b/examples/stream/map_reduce.py\n@@ -19,8 +19,8 @@\n # Map\n for stream in map_streams:\n with stream:\n- x = rand.normal(size=(1, 1024 * 256))\n- y = rand.normal(size=(1024 * 256, 1))\n+ x = rand.normal(size=(1, 1024**2))\n+ y = rand.normal(size=(1024**2, 1))\n z = cupy.matmul(x, y)\n zs.append(z)\n stop_event = stream.record()\n", "issue": "Stream in the context-manager form is not used in `ElementwiseKernel` or `ReductionKernel`\nThis is actually a bug reported back in #1695 that unfortunately went unnoticed. \r\n\r\nIn `examples/stream/map_reduce.py`, a list of streams was created for executing `cupy.matmul()` in parallel, which is backed by a `ReductionKernel` in this case: https://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/core.pyx#L2513-L2516\r\nHowever, inspecting the implementation I found that `ReductionKernel` only accepts an explicit `stream` argument; it does not pick up any current stream: https://github.com/cupy/cupy/blob/32718607a7808ec6bc3a24cf9231a9351f8fc95e/cupy/core/reduction.pxi#L396\r\nIn other words, that example was misleading because those streams were not used at all and so all executions were serialized, as can be checked from nvprof + nvvp (see the circle in red):\r\n<img width=\"972\" alt=\"\u87a2\u5e55\u5feb\u7167 2019-10-03 \u4e0a\u534811 24 27\" src=\"https://user-images.githubusercontent.com/5534781/66140715-978bf180-e5d0-11e9-8228-f613a3eba6fd.png\">\r\n\r\nThe same bug also appears in `ElementwiseKernel`:\r\nhttps://github.com/cupy/cupy/blob/1af22f57fda92ae35bde806d0c4d110faf4fed52/cupy/core/_kernel.pyx#L537\r\n\r\nIn my opinion, unlike `RawKernel` which is not used by any CuPy core functionalities, `ElementwiseKernel` and `ReductionKernel` should honor the current stream by checking the current stream pointer if no stream argument is explicitly given, since many CuPy functions like `cupy.matmul()` do not support passing in a stream. A similar approach is already adopted in the FFT module, see #2362.\n", "before_files": [{"content": "import cupy\nimport time\n\ndevice = cupy.cuda.Device()\nmemory_pool = cupy.cuda.MemoryPool()\ncupy.cuda.set_allocator(memory_pool.malloc)\nrand = cupy.random.generator.RandomState(seed=1)\n\nn = 10\nzs = []\nmap_streams = []\nstop_events = []\nreduce_stream = cupy.cuda.stream.Stream()\nfor i in range(n):\n map_streams.append(cupy.cuda.stream.Stream())\n\nstart_time = time.time()\n\n# Map\nfor stream in map_streams:\n with stream:\n x = rand.normal(size=(1, 1024 * 256))\n y = rand.normal(size=(1024 * 256, 1))\n z = cupy.matmul(x, y)\n zs.append(z)\n stop_event = stream.record()\n stop_events.append(stop_event)\n\n# Block the `reduce_stream` until all events occur. This does not block host.\n# This is not required when reduction is performed in the default (Stream.null)\n# stream unless streams are created with `non_blocking=True` flag.\nfor i in range(n):\n reduce_stream.wait_event(stop_events[i])\n\n# Reduce\nwith reduce_stream:\n z = sum(zs)\n\ndevice.synchronize()\nelapsed_time = time.time() - start_time\nprint('elapsed time', elapsed_time)\nprint('total bytes', memory_pool.total_bytes())\n\n# Free all blocks in the memory pool of streams\nfor stream in map_streams:\n memory_pool.free_all_blocks(stream=stream)\nprint('total bytes', memory_pool.total_bytes())\n", "path": "examples/stream/map_reduce.py"}], "after_files": [{"content": "import cupy\nimport time\n\ndevice = cupy.cuda.Device()\nmemory_pool = cupy.cuda.MemoryPool()\ncupy.cuda.set_allocator(memory_pool.malloc)\nrand = cupy.random.generator.RandomState(seed=1)\n\nn = 10\nzs = []\nmap_streams = []\nstop_events = []\nreduce_stream = cupy.cuda.stream.Stream()\nfor i in range(n):\n map_streams.append(cupy.cuda.stream.Stream())\n\nstart_time = time.time()\n\n# Map\nfor stream in map_streams:\n with stream:\n x = rand.normal(size=(1, 1024**2))\n y = rand.normal(size=(1024**2, 1))\n z = cupy.matmul(x, y)\n zs.append(z)\n stop_event = stream.record()\n stop_events.append(stop_event)\n\n# Block the `reduce_stream` until all events occur. This does not block host.\n# This is not required when reduction is performed in the default (Stream.null)\n# stream unless streams are created with `non_blocking=True` flag.\nfor i in range(n):\n reduce_stream.wait_event(stop_events[i])\n\n# Reduce\nwith reduce_stream:\n z = sum(zs)\n\ndevice.synchronize()\nelapsed_time = time.time() - start_time\nprint('elapsed time', elapsed_time)\nprint('total bytes', memory_pool.total_bytes())\n\n# Free all blocks in the memory pool of streams\nfor stream in map_streams:\n memory_pool.free_all_blocks(stream=stream)\nprint('total bytes', memory_pool.total_bytes())\n", "path": "examples/stream/map_reduce.py"}]}
| 1,192 | 157 |
gh_patches_debug_38311
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-3991
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] resiprocate/1.12.0: Error in autotools configuration
There are several errors in the recipe, especially in autotools configuration step.
https://github.com/conan-io/conan-center-index/blob/c68b60af3de84cf460c7512efce75290f1b395cb/recipes/resiprocate/all/conanfile.py#L50-L58
First, in the lines 54~57, values passed to each options are the inverse of what a consumer actually gave. For example, if `with_ssl` is true, then configuration option is passed as `--with-ssl=no`.
Second, the configure script of the project doesn't understand value assignments to the following options : ssl, mysql, postgresql. In other words, `--with-ssl=yes` and `--with-ssl=no` would make no difference in the configuration step and eventually the library is built with ssl feature on in both cases. You can check this out either from resiprocate project's [`configure.ac` file](https://github.com/resiprocate/resiprocate/blob/6b2756ba8516726cfb04e2b2fa5f4e3e67598a31/configure.ac#L113-L119), or from `configure` file autogenerated from `configure.ac` as below.
```
# Check whether --with-ssl was given.
if test "${with_ssl+set}" = set; then :
withval=$with_ssl;
cat >>confdefs.h <<_ACEOF
#define USE_SSL /**/
_ACEOF
LIBSSL_LIBADD="-lssl -lcrypto"
if true; then
USE_SSL_TRUE=
USE_SSL_FALSE='#'
else
USE_SSL_TRUE='#'
USE_SSL_FALSE=
fi
else
LIBSSL_LIBADD=""
fi
```
And lastly, the project requires pthread, and the recipe didn't include that. When I turn off ssl, mysql, postgresql options, linking the library causes pthread missing error. I guess pthread happened to be secondary dependency from ssl or mysql or postgresql library, and that's why it worked when the options were on even though the recipe didn't explicitly include pthread. I suggest we add pthread in `cpp_info.system_libs`
I'm really thankful to those who had worked in this package. I didn't expect it to be on conan ever, but now it's so nice and easy to use. Happy holidays guys!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/resiprocate/all/conanfile.py`
Content:
```
1 import os
2 from conans import ConanFile, AutoToolsBuildEnvironment, tools
3 from conans.errors import ConanInvalidConfiguration
4
5
6 required_conan_version = ">=1.29.1"
7
8 class ResiprocateConan(ConanFile):
9 name = "resiprocate"
10 description = "The project is dedicated to maintaining a complete, correct, and commercially usable implementation of SIP and a few related protocols. "
11 topics = ("sip", "voip", "communication", "signaling")
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "http://www.resiprocate.org"
14 license = "VSL-1.0"
15 settings = "os", "compiler", "build_type", "arch"
16 options = {"fPIC": [True, False],
17 "shared": [True, False],
18 "with_ssl": [True, False],
19 "with_postgresql": [True, False],
20 "with_mysql": [True, False]}
21 default_options = {"fPIC": True,
22 "shared": False,
23 "with_ssl": True,
24 "with_postgresql": True,
25 "with_mysql": True}
26 _autotools = None
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def requirements(self):
33 if self.settings.os in ("Windows", "Macos"):
34 raise ConanInvalidConfiguration("reSIProcate is not support on {}.".format(self.settings.os))
35 if self.options.with_ssl:
36 self.requires("openssl/1.1.1h")
37 if self.options.with_postgresql:
38 self.requires("libpq/11.5")
39 if self.options.with_mysql:
40 self.requires("libmysqlclient/8.0.17")
41
42 def source(self):
43 tools.get(**self.conan_data["sources"][self.version])
44 os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
45
46 def _configure_autotools(self):
47 if self._autotools:
48 return self._autotools
49 self._autotools = AutoToolsBuildEnvironment(self)
50 yes_no = lambda v: "yes" if v else "no"
51 configure_args = [
52 "--enable-shared={}".format(yes_no(self.options.shared)),
53 "--enable-static={}".format(yes_no(not self.options.shared)),
54 "--with-ssl={}".format(yes_no(not self.options.with_ssl)),
55 "--with-mysql={}".format(yes_no(not self.options.with_mysql)),
56 "--with-postgresql={}".format(yes_no(not self.options.with_postgresql)),
57 "--with-pic={}".format(yes_no(not self.options.fPIC))
58 ]
59
60 self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)
61 return self._autotools
62
63 def build(self):
64 autotools = self._configure_autotools()
65 autotools.make()
66
67 def package(self):
68 self.copy("COPYING", src=self._source_subfolder, dst="licenses")
69 autotools = self._configure_autotools()
70 autotools.install()
71 tools.rmdir(os.path.join(os.path.join(self.package_folder, "share")))
72 tools.remove_files_by_mask(os.path.join(self.package_folder), "*.la")
73
74 def package_info(self):
75 self.cpp_info.libs = ["resip", "rutil", "dum", "resipares"]
76 bin_path = os.path.join(self.package_folder, "bin")
77 self.output.info("Appending PATH environment variable: {}".format(bin_path))
78 self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/resiprocate/all/conanfile.py b/recipes/resiprocate/all/conanfile.py
--- a/recipes/resiprocate/all/conanfile.py
+++ b/recipes/resiprocate/all/conanfile.py
@@ -29,13 +29,22 @@
def _source_subfolder(self):
return "source_subfolder"
- def requirements(self):
+ def config_options(self):
+ if self.settings.os == 'Windows':
+ del self.options.fPIC
+
+ def configure(self):
if self.settings.os in ("Windows", "Macos"):
- raise ConanInvalidConfiguration("reSIProcate is not support on {}.".format(self.settings.os))
+ # FIXME: Visual Studio project & Mac support seems available in resiprocate
+ raise ConanInvalidConfiguration("reSIProcate recipe does not currently support {}.".format(self.settings.os))
+ if self.options.shared:
+ del self.options.fPIC
+
+ def requirements(self):
if self.options.with_ssl:
- self.requires("openssl/1.1.1h")
+ self.requires("openssl/1.1.1i")
if self.options.with_postgresql:
- self.requires("libpq/11.5")
+ self.requires("libpq/11.9")
if self.options.with_mysql:
self.requires("libmysqlclient/8.0.17")
@@ -51,12 +60,17 @@
configure_args = [
"--enable-shared={}".format(yes_no(self.options.shared)),
"--enable-static={}".format(yes_no(not self.options.shared)),
- "--with-ssl={}".format(yes_no(not self.options.with_ssl)),
- "--with-mysql={}".format(yes_no(not self.options.with_mysql)),
- "--with-postgresql={}".format(yes_no(not self.options.with_postgresql)),
- "--with-pic={}".format(yes_no(not self.options.fPIC))
+ "--with-pic={}".format(yes_no(self.options.get_safe("fPIC", True)))
]
+ # These options do not support yes/no
+ if self.options.with_ssl:
+ configure_args.append("--with-ssl")
+ if self.options.with_mysql:
+ configure_args.append("--with-mysql")
+ if self.options.with_postgresql:
+ configure_args.append("--with-postgresql")
+
self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)
return self._autotools
@@ -73,6 +87,8 @@
def package_info(self):
self.cpp_info.libs = ["resip", "rutil", "dum", "resipares"]
+ if self.settings.os in ("Linux", "FreeBSD"):
+ self.cpp_info.system_libs = ["pthread"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info("Appending PATH environment variable: {}".format(bin_path))
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
|
{"golden_diff": "diff --git a/recipes/resiprocate/all/conanfile.py b/recipes/resiprocate/all/conanfile.py\n--- a/recipes/resiprocate/all/conanfile.py\n+++ b/recipes/resiprocate/all/conanfile.py\n@@ -29,13 +29,22 @@\n def _source_subfolder(self):\n return \"source_subfolder\"\n \n- def requirements(self):\n+ def config_options(self):\n+ if self.settings.os == 'Windows':\n+ del self.options.fPIC\n+\n+ def configure(self):\n if self.settings.os in (\"Windows\", \"Macos\"):\n- raise ConanInvalidConfiguration(\"reSIProcate is not support on {}.\".format(self.settings.os))\n+ # FIXME: Visual Studio project & Mac support seems available in resiprocate\n+ raise ConanInvalidConfiguration(\"reSIProcate recipe does not currently support {}.\".format(self.settings.os))\n+ if self.options.shared:\n+ del self.options.fPIC\n+\n+ def requirements(self):\n if self.options.with_ssl:\n- self.requires(\"openssl/1.1.1h\")\n+ self.requires(\"openssl/1.1.1i\")\n if self.options.with_postgresql:\n- self.requires(\"libpq/11.5\")\n+ self.requires(\"libpq/11.9\")\n if self.options.with_mysql:\n self.requires(\"libmysqlclient/8.0.17\")\n \n@@ -51,12 +60,17 @@\n configure_args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n- \"--with-ssl={}\".format(yes_no(not self.options.with_ssl)),\n- \"--with-mysql={}\".format(yes_no(not self.options.with_mysql)),\n- \"--with-postgresql={}\".format(yes_no(not self.options.with_postgresql)),\n- \"--with-pic={}\".format(yes_no(not self.options.fPIC))\n+ \"--with-pic={}\".format(yes_no(self.options.get_safe(\"fPIC\", True)))\n ]\n \n+ # These options do not support yes/no\n+ if self.options.with_ssl:\n+ configure_args.append(\"--with-ssl\")\n+ if self.options.with_mysql:\n+ configure_args.append(\"--with-mysql\")\n+ if self.options.with_postgresql:\n+ configure_args.append(\"--with-postgresql\")\n+ \n self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)\n return self._autotools\n \n@@ -73,6 +87,8 @@\n \n def package_info(self):\n self.cpp_info.libs = [\"resip\", \"rutil\", \"dum\", \"resipares\"]\n+ if self.settings.os in (\"Linux\", \"FreeBSD\"):\n+ self.cpp_info.system_libs = [\"pthread\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "issue": "[package] resiprocate/1.12.0: Error in autotools configuration\nThere are several errors in the recipe, especially in autotools configuration step.\r\n\r\nhttps://github.com/conan-io/conan-center-index/blob/c68b60af3de84cf460c7512efce75290f1b395cb/recipes/resiprocate/all/conanfile.py#L50-L58\r\n\r\nFirst, in the lines 54~57, values passed to each options are the inverse of what a consumer actually gave. For example, if `with_ssl` is true, then configuration option is passed as `--with-ssl=no`.\r\n\r\nSecond, the configure script of the project doesn't understand value assignments to the following options : ssl, mysql, postgresql. In other words, `--with-ssl=yes` and `--with-ssl=no` would make no difference in the configuration step and eventually the library is built with ssl feature on in both cases. You can check this out either from resiprocate project's [`configure.ac` file](https://github.com/resiprocate/resiprocate/blob/6b2756ba8516726cfb04e2b2fa5f4e3e67598a31/configure.ac#L113-L119), or from `configure` file autogenerated from `configure.ac` as below. \r\n\r\n```\r\n# Check whether --with-ssl was given.\r\nif test \"${with_ssl+set}\" = set; then :\r\n withval=$with_ssl;\r\ncat >>confdefs.h <<_ACEOF\r\n#define USE_SSL /**/\r\n_ACEOF\r\n\r\n LIBSSL_LIBADD=\"-lssl -lcrypto\"\r\n\r\n if true; then\r\n USE_SSL_TRUE=\r\n USE_SSL_FALSE='#'\r\nelse\r\n USE_SSL_TRUE='#'\r\n USE_SSL_FALSE=\r\nfi\r\n\r\nelse\r\n LIBSSL_LIBADD=\"\"\r\n\r\nfi\r\n```\r\n\r\nAnd lastly, the project requires pthread, and the recipe didn't include that. When I turn off ssl, mysql, postgresql options, linking the library causes pthread missing error. I guess pthread happened to be secondary dependency from ssl or mysql or postgresql library, and that's why it worked when the options were on even though the recipe didn't explicitly include pthread. I suggest we add pthread in `cpp_info.system_libs`\r\n\r\nI'm really thankful to those who had worked in this package. I didn't expect it to be on conan ever, but now it's so nice and easy to use. Happy holidays guys! \n", "before_files": [{"content": "import os\nfrom conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nrequired_conan_version = \">=1.29.1\"\n\nclass ResiprocateConan(ConanFile):\n name = \"resiprocate\"\n description = \"The project is dedicated to maintaining a complete, correct, and commercially usable implementation of SIP and a few related protocols. \"\n topics = (\"sip\", \"voip\", \"communication\", \"signaling\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.resiprocate.org\"\n license = \"VSL-1.0\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"fPIC\": [True, False],\n \"shared\": [True, False],\n \"with_ssl\": [True, False],\n \"with_postgresql\": [True, False],\n \"with_mysql\": [True, False]}\n default_options = {\"fPIC\": True,\n \"shared\": False,\n \"with_ssl\": True,\n \"with_postgresql\": True,\n \"with_mysql\": True}\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def requirements(self):\n if self.settings.os in (\"Windows\", \"Macos\"):\n raise ConanInvalidConfiguration(\"reSIProcate is not support on {}.\".format(self.settings.os))\n if self.options.with_ssl:\n self.requires(\"openssl/1.1.1h\")\n if self.options.with_postgresql:\n self.requires(\"libpq/11.5\")\n if self.options.with_mysql:\n self.requires(\"libmysqlclient/8.0.17\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n yes_no = lambda v: \"yes\" if v else \"no\"\n configure_args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n \"--with-ssl={}\".format(yes_no(not self.options.with_ssl)),\n \"--with-mysql={}\".format(yes_no(not self.options.with_mysql)),\n \"--with-postgresql={}\".format(yes_no(not self.options.with_postgresql)),\n \"--with-pic={}\".format(yes_no(not self.options.fPIC))\n ]\n\n self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(os.path.join(self.package_folder, \"share\")))\n tools.remove_files_by_mask(os.path.join(self.package_folder), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = [\"resip\", \"rutil\", \"dum\", \"resipares\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/resiprocate/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nrequired_conan_version = \">=1.29.1\"\n\nclass ResiprocateConan(ConanFile):\n name = \"resiprocate\"\n description = \"The project is dedicated to maintaining a complete, correct, and commercially usable implementation of SIP and a few related protocols. \"\n topics = (\"sip\", \"voip\", \"communication\", \"signaling\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://www.resiprocate.org\"\n license = \"VSL-1.0\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"fPIC\": [True, False],\n \"shared\": [True, False],\n \"with_ssl\": [True, False],\n \"with_postgresql\": [True, False],\n \"with_mysql\": [True, False]}\n default_options = {\"fPIC\": True,\n \"shared\": False,\n \"with_ssl\": True,\n \"with_postgresql\": True,\n \"with_mysql\": True}\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n if self.settings.os in (\"Windows\", \"Macos\"):\n # FIXME: Visual Studio project & Mac support seems available in resiprocate\n raise ConanInvalidConfiguration(\"reSIProcate recipe does not currently support {}.\".format(self.settings.os))\n if self.options.shared:\n del self.options.fPIC\n\n def requirements(self):\n if self.options.with_ssl:\n self.requires(\"openssl/1.1.1i\")\n if self.options.with_postgresql:\n self.requires(\"libpq/11.9\")\n if self.options.with_mysql:\n self.requires(\"libmysqlclient/8.0.17\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"{}-{}\".format(self.name, self.version), self._source_subfolder)\n\n def _configure_autotools(self):\n if self._autotools:\n return self._autotools\n self._autotools = AutoToolsBuildEnvironment(self)\n yes_no = lambda v: \"yes\" if v else \"no\"\n configure_args = [\n \"--enable-shared={}\".format(yes_no(self.options.shared)),\n \"--enable-static={}\".format(yes_no(not self.options.shared)),\n \"--with-pic={}\".format(yes_no(self.options.get_safe(\"fPIC\", True)))\n ]\n\n # These options do not support yes/no\n if self.options.with_ssl:\n configure_args.append(\"--with-ssl\")\n if self.options.with_mysql:\n configure_args.append(\"--with-mysql\")\n if self.options.with_postgresql:\n configure_args.append(\"--with-postgresql\")\n \n self._autotools.configure(configure_dir=self._source_subfolder, args=configure_args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(os.path.join(self.package_folder, \"share\")))\n tools.remove_files_by_mask(os.path.join(self.package_folder), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = [\"resip\", \"rutil\", \"dum\", \"resipares\"]\n if self.settings.os in (\"Linux\", \"FreeBSD\"):\n self.cpp_info.system_libs = [\"pthread\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/resiprocate/all/conanfile.py"}]}
| 1,760 | 670 |
gh_patches_debug_20750
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-4973
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Certain float values not allowed with select_slider
### Summary
Certain values are not allowed when defining `select_slider` of floats due to float precision errors.
### Steps to reproduce
Code snippet:
```
import numpy as np
import streamlit as st
st.select_slider(
label="Select slider of floats",
options=np.arange(0.0, 0.25, 0.05),
value=(0.1, 0.15),
)
```
**Expected behavior:**
A `select_slider` widget where options are float numbers: `[0.0, 0.05, 0.1, 0.15, 0.2]`, and default values are float numbers: `[0.1, 0.15]`.
**Actual behavior:**
The following traceback:
```
2022-04-28 21:47:19.181 Traceback (most recent call last):
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/scriptrunner/script_runner.py", line 443, in _run_script
exec(code, module.__dict__)
File "/source/streamlit/app-report-port-taranaki/app/test.py", line 5, in <module>
st.select_slider(
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py", line 122, in select_slider
return self._select_slider(
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py", line 180, in _select_slider
slider_value = as_index_list(value)
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py", line 164, in as_index_list
slider_value = [index_(opt, val) for val in v]
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py", line 164, in <listcomp>
slider_value = [index_(opt, val) for val in v]
File "/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/util.py", line 130, in index_
raise ValueError("{} is not in iterable".format(str(x)))
ValueError: 0.15 is not in iterable
```
Inspection from [utils.py](https://github.com/streamlit/streamlit/blob/develop/lib/streamlit/util.py#L110-L129) shows that `x` does not match values in `iterable` due to float precision:
```
> /home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/util.py(130)index_()
129 import ipdb; ipdb.set_trace()
--> 130 raise ValueError("{} is not in iterable".format(str(x)))
131
ipdb> p x
0.15
ipdb> p iterable
[0.0, 0.05, 0.1, 0.15000000000000002, 0.2]
```
### Is this a regression?
Don't know
### Debug info
- Streamlit version: 1.8.1
- Python version: 3.8.10
- Using Python virtualenv
- OS version: Ubuntu 20.04
- Browser version: Google Chrome 100.0.4896.127
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/streamlit/util.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """A bunch of useful utilities."""
16
17 import functools
18 import hashlib
19 import os
20 import subprocess
21
22 from typing import Any, Dict, List, Mapping, TypeVar
23 from typing_extensions import Final
24
25 from streamlit import env_util
26
27 # URL of Streamlit's help page.
28 HELP_DOC: Final = "https://docs.streamlit.io/"
29
30
31 def memoize(func):
32 """Decorator to memoize the result of a no-args func."""
33 result = [] # type: List[Any]
34
35 @functools.wraps(func)
36 def wrapped_func():
37 if not result:
38 result.append(func())
39 return result[0]
40
41 return wrapped_func
42
43
44 def open_browser(url):
45 """Open a web browser pointing to a given URL.
46
47 We use this function instead of Python's `webbrowser` module because this
48 way we can capture stdout/stderr to avoid polluting the terminal with the
49 browser's messages. For example, Chrome always prints things like "Created
50 new window in existing browser session", and those get on the user's way.
51
52 url : str
53 The URL. Must include the protocol.
54
55 """
56
57 # Treat Windows separately because:
58 # 1. /dev/null doesn't exist.
59 # 2. subprocess.Popen(['start', url]) doesn't actually pop up the
60 # browser even though 'start url' works from the command prompt.
61 # Fun!
62 # Also, use webbrowser if we are on Linux and xdg-open is not installed.
63 #
64 # We don't use the webbrowser module on Linux and Mac because some browsers
65 # (ahem... Chrome) always print "Opening in existing browser session" to
66 # the terminal, which is spammy and annoying. So instead we start the
67 # browser ourselves and send all its output to /dev/null.
68
69 if env_util.IS_WINDOWS:
70 _open_browser_with_webbrowser(url)
71 return
72 if env_util.IS_LINUX_OR_BSD:
73 if env_util.is_executable_in_path("xdg-open"):
74 _open_browser_with_command("xdg-open", url)
75 return
76 _open_browser_with_webbrowser(url)
77 return
78 if env_util.IS_DARWIN:
79 _open_browser_with_command("open", url)
80 return
81
82 import platform
83
84 raise Error('Cannot open browser in platform "%s"' % platform.system())
85
86
87 def _open_browser_with_webbrowser(url):
88 import webbrowser
89
90 webbrowser.open(url)
91
92
93 def _open_browser_with_command(command, url):
94 cmd_line = [command, url]
95 with open(os.devnull, "w") as devnull:
96 subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT)
97
98
99 def _maybe_tuple_to_list(item: Any) -> Any:
100 """Convert a tuple to a list. Leave as is if it's not a tuple."""
101 if isinstance(item, tuple):
102 return list(item)
103 return item
104
105
106 def repr_(cls) -> str:
107 classname = cls.__class__.__name__
108 args = ", ".join([f"{k}={repr(v)}" for (k, v) in cls.__dict__.items()])
109 return f"{classname}({args})"
110
111
112 def index_(iterable, x) -> int:
113 """Return zero-based index of the first item whose value is equal to x.
114 Raises a ValueError if there is no such item.
115
116 We need a custom implementation instead of the built-in list .index() to
117 be compatible with NumPy array and Pandas Series.
118
119 Parameters
120 ----------
121 iterable : list, tuple, numpy.ndarray, pandas.Series
122
123 Returns
124 -------
125 int
126 """
127
128 for i, value in enumerate(iterable):
129 if x == value:
130 return i
131 raise ValueError("{} is not in iterable".format(str(x)))
132
133
134 _Key = TypeVar("_Key", bound=str)
135 _Value = TypeVar("_Value")
136
137
138 def lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]:
139 return {k.lower().strip(): v for k, v in dict.items()}
140
141
142 # TODO: Move this into errors.py? Replace with StreamlitAPIException?
143 class Error(Exception):
144 pass
145
146
147 def calc_md5(s: str) -> str:
148 """Return the md5 hash of the given string."""
149 h = hashlib.new("md5")
150 h.update(s.encode("utf-8"))
151 return h.hexdigest()
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py
--- a/lib/streamlit/util.py
+++ b/lib/streamlit/util.py
@@ -18,6 +18,7 @@
import hashlib
import os
import subprocess
+import numpy as np
from typing import Any, Dict, List, Mapping, TypeVar
from typing_extensions import Final
@@ -26,6 +27,7 @@
# URL of Streamlit's help page.
HELP_DOC: Final = "https://docs.streamlit.io/"
+FLOAT_EQUALITY_EPSILON: Final = 0.000000000005
def memoize(func):
@@ -126,6 +128,11 @@
"""
for i, value in enumerate(iterable):
+ # https://stackoverflow.com/questions/588004/is-floating-point-math-broken
+ # https://github.com/streamlit/streamlit/issues/4663
+ if isinstance(iterable, np.ndarray):
+ if abs(x - value) < FLOAT_EQUALITY_EPSILON:
+ return i
if x == value:
return i
raise ValueError("{} is not in iterable".format(str(x)))
|
{"golden_diff": "diff --git a/lib/streamlit/util.py b/lib/streamlit/util.py\n--- a/lib/streamlit/util.py\n+++ b/lib/streamlit/util.py\n@@ -18,6 +18,7 @@\n import hashlib\n import os\n import subprocess\n+import numpy as np\n \n from typing import Any, Dict, List, Mapping, TypeVar\n from typing_extensions import Final\n@@ -26,6 +27,7 @@\n \n # URL of Streamlit's help page.\n HELP_DOC: Final = \"https://docs.streamlit.io/\"\n+FLOAT_EQUALITY_EPSILON: Final = 0.000000000005\n \n \n def memoize(func):\n@@ -126,6 +128,11 @@\n \"\"\"\n \n for i, value in enumerate(iterable):\n+ # https://stackoverflow.com/questions/588004/is-floating-point-math-broken\n+ # https://github.com/streamlit/streamlit/issues/4663\n+ if isinstance(iterable, np.ndarray):\n+ if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n+ return i\n if x == value:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))\n", "issue": "Certain float values not allowed with select_slider\n### Summary\r\n\r\nCertain values are not allowed when defining `select_slider` of floats due to float precision errors.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nimport numpy as np\r\nimport streamlit as st\r\n\r\n\r\nst.select_slider(\r\n label=\"Select slider of floats\",\r\n options=np.arange(0.0, 0.25, 0.05),\r\n value=(0.1, 0.15),\r\n)\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nA `select_slider` widget where options are float numbers: `[0.0, 0.05, 0.1, 0.15, 0.2]`, and default values are float numbers: `[0.1, 0.15]`.\r\n\r\n**Actual behavior:**\r\n\r\nThe following traceback:\r\n\r\n```\r\n2022-04-28 21:47:19.181 Traceback (most recent call last):\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/scriptrunner/script_runner.py\", line 443, in _run_script\r\n exec(code, module.__dict__)\r\n File \"/source/streamlit/app-report-port-taranaki/app/test.py\", line 5, in <module>\r\n st.select_slider(\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py\", line 122, in select_slider\r\n return self._select_slider(\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py\", line 180, in _select_slider\r\n slider_value = as_index_list(value)\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py\", line 164, in as_index_list\r\n slider_value = [index_(opt, val) for val in v]\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/elements/select_slider.py\", line 164, in <listcomp>\r\n slider_value = [index_(opt, val) for val in v]\r\n File \"/home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/util.py\", line 130, in index_\r\n raise ValueError(\"{} is not in iterable\".format(str(x)))\r\nValueError: 0.15 is not in iterable\r\n```\r\n\r\nInspection from [utils.py](https://github.com/streamlit/streamlit/blob/develop/lib/streamlit/util.py#L110-L129) shows that `x` does not match values in `iterable` due to float precision:\r\n\r\n```\r\n> /home/rguedes/.virtualenvs/streamlit/lib/python3.8/site-packages/streamlit/util.py(130)index_()\r\n 129 import ipdb; ipdb.set_trace()\r\n--> 130 raise ValueError(\"{} is not in iterable\".format(str(x)))\r\n 131 \r\n\r\nipdb> p x\r\n0.15\r\nipdb> p iterable\r\n[0.0, 0.05, 0.1, 0.15000000000000002, 0.2]\r\n```\r\n### Is this a regression?\r\n\r\nDon't know\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.8.1\r\n- Python version: 3.8.10\r\n- Using Python virtualenv\r\n- OS version: Ubuntu 20.04\r\n- Browser version: Google Chrome 100.0.4896.127\r\n\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A bunch of useful utilities.\"\"\"\n\nimport functools\nimport hashlib\nimport os\nimport subprocess\n\nfrom typing import Any, Dict, List, Mapping, TypeVar\nfrom typing_extensions import Final\n\nfrom streamlit import env_util\n\n# URL of Streamlit's help page.\nHELP_DOC: Final = \"https://docs.streamlit.io/\"\n\n\ndef memoize(func):\n \"\"\"Decorator to memoize the result of a no-args func.\"\"\"\n result = [] # type: List[Any]\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func\n\n\ndef open_browser(url):\n \"\"\"Open a web browser pointing to a given URL.\n\n We use this function instead of Python's `webbrowser` module because this\n way we can capture stdout/stderr to avoid polluting the terminal with the\n browser's messages. For example, Chrome always prints things like \"Created\n new window in existing browser session\", and those get on the user's way.\n\n url : str\n The URL. Must include the protocol.\n\n \"\"\"\n\n # Treat Windows separately because:\n # 1. /dev/null doesn't exist.\n # 2. subprocess.Popen(['start', url]) doesn't actually pop up the\n # browser even though 'start url' works from the command prompt.\n # Fun!\n # Also, use webbrowser if we are on Linux and xdg-open is not installed.\n #\n # We don't use the webbrowser module on Linux and Mac because some browsers\n # (ahem... Chrome) always print \"Opening in existing browser session\" to\n # the terminal, which is spammy and annoying. So instead we start the\n # browser ourselves and send all its output to /dev/null.\n\n if env_util.IS_WINDOWS:\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_LINUX_OR_BSD:\n if env_util.is_executable_in_path(\"xdg-open\"):\n _open_browser_with_command(\"xdg-open\", url)\n return\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_DARWIN:\n _open_browser_with_command(\"open\", url)\n return\n\n import platform\n\n raise Error('Cannot open browser in platform \"%s\"' % platform.system())\n\n\ndef _open_browser_with_webbrowser(url):\n import webbrowser\n\n webbrowser.open(url)\n\n\ndef _open_browser_with_command(command, url):\n cmd_line = [command, url]\n with open(os.devnull, \"w\") as devnull:\n subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT)\n\n\ndef _maybe_tuple_to_list(item: Any) -> Any:\n \"\"\"Convert a tuple to a list. Leave as is if it's not a tuple.\"\"\"\n if isinstance(item, tuple):\n return list(item)\n return item\n\n\ndef repr_(cls) -> str:\n classname = cls.__class__.__name__\n args = \", \".join([f\"{k}={repr(v)}\" for (k, v) in cls.__dict__.items()])\n return f\"{classname}({args})\"\n\n\ndef index_(iterable, x) -> int:\n \"\"\"Return zero-based index of the first item whose value is equal to x.\n Raises a ValueError if there is no such item.\n\n We need a custom implementation instead of the built-in list .index() to\n be compatible with NumPy array and Pandas Series.\n\n Parameters\n ----------\n iterable : list, tuple, numpy.ndarray, pandas.Series\n\n Returns\n -------\n int\n \"\"\"\n\n for i, value in enumerate(iterable):\n if x == value:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))\n\n\n_Key = TypeVar(\"_Key\", bound=str)\n_Value = TypeVar(\"_Value\")\n\n\ndef lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]:\n return {k.lower().strip(): v for k, v in dict.items()}\n\n\n# TODO: Move this into errors.py? Replace with StreamlitAPIException?\nclass Error(Exception):\n pass\n\n\ndef calc_md5(s: str) -> str:\n \"\"\"Return the md5 hash of the given string.\"\"\"\n h = hashlib.new(\"md5\")\n h.update(s.encode(\"utf-8\"))\n return h.hexdigest()\n", "path": "lib/streamlit/util.py"}], "after_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A bunch of useful utilities.\"\"\"\n\nimport functools\nimport hashlib\nimport os\nimport subprocess\nimport numpy as np\n\nfrom typing import Any, Dict, List, Mapping, TypeVar\nfrom typing_extensions import Final\n\nfrom streamlit import env_util\n\n# URL of Streamlit's help page.\nHELP_DOC: Final = \"https://docs.streamlit.io/\"\nFLOAT_EQUALITY_EPSILON: Final = 0.000000000005\n\n\ndef memoize(func):\n \"\"\"Decorator to memoize the result of a no-args func.\"\"\"\n result = [] # type: List[Any]\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func\n\n\ndef open_browser(url):\n \"\"\"Open a web browser pointing to a given URL.\n\n We use this function instead of Python's `webbrowser` module because this\n way we can capture stdout/stderr to avoid polluting the terminal with the\n browser's messages. For example, Chrome always prints things like \"Created\n new window in existing browser session\", and those get on the user's way.\n\n url : str\n The URL. Must include the protocol.\n\n \"\"\"\n\n # Treat Windows separately because:\n # 1. /dev/null doesn't exist.\n # 2. subprocess.Popen(['start', url]) doesn't actually pop up the\n # browser even though 'start url' works from the command prompt.\n # Fun!\n # Also, use webbrowser if we are on Linux and xdg-open is not installed.\n #\n # We don't use the webbrowser module on Linux and Mac because some browsers\n # (ahem... Chrome) always print \"Opening in existing browser session\" to\n # the terminal, which is spammy and annoying. So instead we start the\n # browser ourselves and send all its output to /dev/null.\n\n if env_util.IS_WINDOWS:\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_LINUX_OR_BSD:\n if env_util.is_executable_in_path(\"xdg-open\"):\n _open_browser_with_command(\"xdg-open\", url)\n return\n _open_browser_with_webbrowser(url)\n return\n if env_util.IS_DARWIN:\n _open_browser_with_command(\"open\", url)\n return\n\n import platform\n\n raise Error('Cannot open browser in platform \"%s\"' % platform.system())\n\n\ndef _open_browser_with_webbrowser(url):\n import webbrowser\n\n webbrowser.open(url)\n\n\ndef _open_browser_with_command(command, url):\n cmd_line = [command, url]\n with open(os.devnull, \"w\") as devnull:\n subprocess.Popen(cmd_line, stdout=devnull, stderr=subprocess.STDOUT)\n\n\ndef _maybe_tuple_to_list(item: Any) -> Any:\n \"\"\"Convert a tuple to a list. Leave as is if it's not a tuple.\"\"\"\n if isinstance(item, tuple):\n return list(item)\n return item\n\n\ndef repr_(cls) -> str:\n classname = cls.__class__.__name__\n args = \", \".join([f\"{k}={repr(v)}\" for (k, v) in cls.__dict__.items()])\n return f\"{classname}({args})\"\n\n\ndef index_(iterable, x) -> int:\n \"\"\"Return zero-based index of the first item whose value is equal to x.\n Raises a ValueError if there is no such item.\n\n We need a custom implementation instead of the built-in list .index() to\n be compatible with NumPy array and Pandas Series.\n\n Parameters\n ----------\n iterable : list, tuple, numpy.ndarray, pandas.Series\n\n Returns\n -------\n int\n \"\"\"\n\n for i, value in enumerate(iterable):\n # https://stackoverflow.com/questions/588004/is-floating-point-math-broken\n # https://github.com/streamlit/streamlit/issues/4663\n if isinstance(iterable, np.ndarray):\n if abs(x - value) < FLOAT_EQUALITY_EPSILON:\n return i\n if x == value:\n return i\n raise ValueError(\"{} is not in iterable\".format(str(x)))\n\n\n_Key = TypeVar(\"_Key\", bound=str)\n_Value = TypeVar(\"_Value\")\n\n\ndef lower_clean_dict_keys(dict: Mapping[_Key, _Value]) -> Dict[str, _Value]:\n return {k.lower().strip(): v for k, v in dict.items()}\n\n\n# TODO: Move this into errors.py? Replace with StreamlitAPIException?\nclass Error(Exception):\n pass\n\n\ndef calc_md5(s: str) -> str:\n \"\"\"Return the md5 hash of the given string.\"\"\"\n h = hashlib.new(\"md5\")\n h.update(s.encode(\"utf-8\"))\n return h.hexdigest()\n", "path": "lib/streamlit/util.py"}]}
| 2,534 | 267 |
gh_patches_debug_19580
|
rasdani/github-patches
|
git_diff
|
pyjanitor-devs__pyjanitor-445
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC] Adding context about method chaining
# Brief Description of Fix
<!-- Please describe the fix in terms of a "before" and "after". In other words, what's not so good about the current docs
page, and what you would like to see it become.
Example starter wording is provided. -->
Currently, the docs don't explain what method chaining is. A beginner would benefit from reading about it.
I would like to propose a change, such that now the docs include a link to a medium post explaining the topic.
# Relevant Context
<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available
to get you started. -->
- [Link to documentation page](http://pyjanitor.readthedocs.io)
- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/README.rst)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 """Sphinx configuration."""
2
3 # -*- coding: utf-8 -*-
4 #
5 # Configuration file for the Sphinx documentation builder.
6 #
7 # This file does only contain a selection of the most common options. For a
8 # full list see the documentation:
9 # http://www.sphinx-doc.org/en/stable/config
10
11 # -- Path setup --------------------------------------------------------------
12
13 # If extensions (or modules to document with autodoc) are in another directory,
14 # add these directories to sys.path here. If the directory is relative to the
15 # documentation root, use os.path.abspath to make it absolute, like shown here.
16 #
17 import os
18 import sys
19 import datetime
20 from pathlib import Path
21
22 sys.path.insert(0, os.path.abspath("."))
23 sys.path.insert(0, os.path.abspath("../examples"))
24
25 # Make a symlink in our sphinx source directory to the top-level
26 # examples/notebooks directory so we can include notebooks in the doc
27 notebooks = Path("./notebooks")
28 if not notebooks.exists():
29 print("Making symlink to ../examples/notebooks")
30 notebooks.symlink_to("../examples/notebooks")
31
32
33 # -- Project information -----------------------------------------------------
34
35 project = "pyjanitor"
36
37
38 now = datetime.datetime.now()
39 CurrentYear = str(now.year)
40 copyright = CurrentYear+", PyJanitor devs"
41 author = "Eric J. Ma"
42
43 # The short X.Y version
44 version = "0.1.0"
45 # The full version, including alpha/beta/rc tags
46 release = ""
47
48
49 # -- General configuration ---------------------------------------------------
50
51 # If your documentation needs a minimal Sphinx version, state it here.
52 #
53 # needs_sphinx = '1.0'
54
55 # Add any Sphinx extension module names here, as strings. They can be
56 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
57 # ones.
58 extensions = [
59 "sphinx.ext.autodoc",
60 "sphinx.ext.doctest",
61 "sphinx.ext.intersphinx",
62 "sphinx.ext.todo",
63 "sphinx.ext.coverage",
64 "sphinx.ext.viewcode",
65 "sphinx.ext.githubpages",
66 "sphinxcontrib.fulltoc",
67 "nbsphinx",
68 "sphinx.ext.autosummary",
69 ]
70
71 # Add any paths that contain templates here, relative to this directory.
72 templates_path = ["_templates"]
73
74 # The suffix(es) of source filenames.
75 # You can specify multiple suffix as a list of string:
76 #
77 # source_suffix = ['.rst', '.md']
78 source_suffix = [".md", ".rst", ".ipynb"]
79
80 # The master toctree document.
81 master_doc = "index"
82
83 # The language for content autogenerated by Sphinx. Refer to documentation
84 # for a list of supported languages.
85 #
86 # This is also used if you do content translation via gettext catalogs.
87 # Usually you set "language" from the command line for these cases.
88 language = None
89
90 # List of patterns, relative to source directory, that match files and
91 # directories to ignore when looking for source files.
92 # This pattern also affects html_static_path and html_extra_path .
93 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
94
95 # The name of the Pygments (syntax highlighting) style to use.
96 pygments_style = "sphinx"
97
98
99 # -- Options for HTML output -------------------------------------------------
100
101 # The theme to use for HTML and HTML Help pages. See the documentation for
102 # a list of builtin themes.
103 #
104 html_theme = "alabaster"
105
106 # Theme options are theme-specific and customize the look and feel of a theme
107 # further. For a list of options available for each theme, see the
108 # documentation.
109 #
110 html_theme_options = {"logo": "logo_title.svg"}
111
112 # Add any paths that contain custom static files (such as style sheets) here,
113 # relative to this directory. They are copied after the builtin static files,
114 # so a file named "default.css" will overwrite the builtin "default.css".
115 html_static_path = ["_static"]
116
117 # Custom sidebar templates, must be a dictionary that maps document names
118 # to template names.
119 #
120 # The default sidebars (for documents that don't match any pattern) are
121 # defined by theme itself. Builtin themes are using these templates by
122 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
123 # 'searchbox.html']``.
124 #
125 html_sidebars = {
126 "**": ["about.html", "navigation.html", "relations.html", "searchbox.html"]
127 }
128
129
130 # -- Options for HTMLHelp output ---------------------------------------------
131
132 # Output file base name for HTML help builder.
133 htmlhelp_basename = "pyjanitordoc"
134
135
136 # -- Options for LaTeX output ------------------------------------------------
137
138 latex_elements = {
139 # The paper size ('letterpaper' or 'a4paper').
140 #
141 # 'papersize': 'letterpaper',
142 # The font size ('10pt', '11pt' or '12pt').
143 #
144 # 'pointsize': '10pt',
145 # Additional stuff for the LaTeX preamble.
146 #
147 # 'preamble': '',
148 # Latex figure (float) alignment
149 #
150 # 'figure_align': 'htbp',
151 }
152
153 # Grouping the document tree into LaTeX files. List of tuples
154 # (source start file, target name, title,
155 # author, documentclass [howto, manual, or own class]).
156 latex_documents = [
157 (
158 master_doc,
159 "pyjanitor.tex",
160 "pyjanitor Documentation",
161 "Eric J. Ma",
162 "manual",
163 )
164 ]
165
166
167 # -- Options for manual page output ------------------------------------------
168
169 # One entry per manual page. List of tuples
170 # (source start file, name, description, authors, manual section).
171 man_pages = [(master_doc, "pyjanitor", "pyjanitor Documentation", [author], 1)]
172
173
174 # -- Options for Texinfo output ----------------------------------------------
175
176 # Grouping the document tree into Texinfo files. List of tuples
177 # (source start file, target name, title, author,
178 # dir menu entry, description, category)
179 texinfo_documents = [
180 (
181 master_doc,
182 "pyjanitor",
183 "pyjanitor Documentation",
184 author,
185 "pyjanitor",
186 "One line description of project.",
187 "Miscellaneous",
188 )
189 ]
190
191
192 # -- Extension configuration -------------------------------------------------
193
194 # -- Options for intersphinx extension ---------------------------------------
195
196 # Example configuration for intersphinx: refer to the Python standard library.
197 intersphinx_mapping = {
198 "https://docs.python.org/": None,
199 "https://pandas.pydata.org/pandas-docs/stable": None,
200 }
201
202 # -- Options for todo extension ----------------------------------------------
203
204 # If true, `todo` and `todoList` produce output, else they produce nothing.
205 todo_include_todos = True
206
207
208 # -- Other options -----------------------------------------------------------
209
210 autosummary_generate = True # Make _autosummary files and include them
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -10,13 +10,14 @@
# -- Path setup --------------------------------------------------------------
+import datetime
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
-import datetime
from pathlib import Path
sys.path.insert(0, os.path.abspath("."))
@@ -37,7 +38,7 @@
now = datetime.datetime.now()
CurrentYear = str(now.year)
-copyright = CurrentYear+", PyJanitor devs"
+copyright = CurrentYear + ", PyJanitor devs"
author = "Eric J. Ma"
# The short X.Y version
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -10,13 +10,14 @@\n \n # -- Path setup --------------------------------------------------------------\n \n+import datetime\n+\n # If extensions (or modules to document with autodoc) are in another directory,\n # add these directories to sys.path here. If the directory is relative to the\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n #\n import os\n import sys\n-import datetime\n from pathlib import Path\n \n sys.path.insert(0, os.path.abspath(\".\"))\n@@ -37,7 +38,7 @@\n \n now = datetime.datetime.now()\n CurrentYear = str(now.year)\n-copyright = CurrentYear+\", PyJanitor devs\"\n+copyright = CurrentYear + \", PyJanitor devs\"\n author = \"Eric J. Ma\"\n \n # The short X.Y version\n", "issue": "[DOC] Adding context about method chaining \n# Brief Description of Fix\r\n\r\n<!-- Please describe the fix in terms of a \"before\" and \"after\". In other words, what's not so good about the current docs\r\npage, and what you would like to see it become. \r\n\r\nExample starter wording is provided. -->\r\n\r\nCurrently, the docs don't explain what method chaining is. A beginner would benefit from reading about it.\r\n\r\nI would like to propose a change, such that now the docs include a link to a medium post explaining the topic.\r\n\r\n# Relevant Context\r\n\r\n<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available\r\nto get you started. -->\r\n\r\n- [Link to documentation page](http://pyjanitor.readthedocs.io)\r\n- [Link to exact file to be edited](https://github.com/ericmjl/pyjanitor/README.rst)\r\n\n", "before_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport datetime\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\n\n\nnow = datetime.datetime.now()\nCurrentYear = str(now.year)\ncopyright = CurrentYear+\", PyJanitor devs\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}], "after_files": [{"content": "\"\"\"Sphinx configuration.\"\"\"\n\n# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport datetime\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, os.path.abspath(\".\"))\nsys.path.insert(0, os.path.abspath(\"../examples\"))\n\n# Make a symlink in our sphinx source directory to the top-level\n# examples/notebooks directory so we can include notebooks in the doc\nnotebooks = Path(\"./notebooks\")\nif not notebooks.exists():\n print(\"Making symlink to ../examples/notebooks\")\n notebooks.symlink_to(\"../examples/notebooks\")\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pyjanitor\"\n\n\nnow = datetime.datetime.now()\nCurrentYear = str(now.year)\ncopyright = CurrentYear + \", PyJanitor devs\"\nauthor = \"Eric J. Ma\"\n\n# The short X.Y version\nversion = \"0.1.0\"\n# The full version, including alpha/beta/rc tags\nrelease = \"\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinxcontrib.fulltoc\",\n \"nbsphinx\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".md\", \".rst\", \".ipynb\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\"logo\": \"logo_title.svg\"}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n \"**\": [\"about.html\", \"navigation.html\", \"relations.html\", \"searchbox.html\"]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"pyjanitordoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"pyjanitor.tex\",\n \"pyjanitor Documentation\",\n \"Eric J. Ma\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyjanitor\", \"pyjanitor Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"pyjanitor\",\n \"pyjanitor Documentation\",\n author,\n \"pyjanitor\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"https://docs.python.org/\": None,\n \"https://pandas.pydata.org/pandas-docs/stable\": None,\n}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Other options -----------------------------------------------------------\n\nautosummary_generate = True # Make _autosummary files and include them\n", "path": "docs/conf.py"}]}
| 2,448 | 194 |
gh_patches_debug_19930
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-11757
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sample doc in #11561 is malformatted
see https://github.com/matrix-org/synapse/pull/11561#pullrequestreview-851817482
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/config/api.py`
Content:
```
1 # Copyright 2015-2021 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from typing import Iterable
17
18 from synapse.api.constants import EventTypes
19 from synapse.config._base import Config, ConfigError
20 from synapse.config._util import validate_config
21 from synapse.types import JsonDict
22
23 logger = logging.getLogger(__name__)
24
25
26 class ApiConfig(Config):
27 section = "api"
28
29 def read_config(self, config: JsonDict, **kwargs):
30 validate_config(_MAIN_SCHEMA, config, ())
31 self.room_prejoin_state = list(self._get_prejoin_state_types(config))
32 self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False)
33
34 def generate_config_section(cls, **kwargs) -> str:
35 formatted_default_state_types = "\n".join(
36 " # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES
37 )
38
39 return """\
40 ## API Configuration ##
41
42 # Controls for the state that is shared with users who receive an invite
43 # to a room
44 #
45 room_prejoin_state:
46 # By default, the following state event types are shared with users who
47 # receive invites to the room:
48 #
49 %(formatted_default_state_types)s
50 #
51 # Uncomment the following to disable these defaults (so that only the event
52 # types listed in 'additional_event_types' are shared). Defaults to 'false'.
53 #
54 #disable_default_event_types: true
55
56 # Additional state event types to share with users when they are invited
57 # to a room.
58 #
59 # By default, this list is empty (so only the default event types are shared).
60 #
61 #additional_event_types:
62 # - org.example.custom.event.type
63
64 # By default when puppeting another user, the user who has created the
65 # access token for puppeting is tracked. If this is enabled, both
66 # requests are tracked. Implicitly enables MAU tracking for puppeted users.
67 # Uncomment to also track puppeted user IP's.
68 #
69 #track_puppeted_user_ips: true
70 """ % {
71 "formatted_default_state_types": formatted_default_state_types
72 }
73
74 def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:
75 """Get the event types to include in the prejoin state
76
77 Parses the config and returns an iterable of the event types to be included.
78 """
79 room_prejoin_state_config = config.get("room_prejoin_state") or {}
80
81 # backwards-compatibility support for room_invite_state_types
82 if "room_invite_state_types" in config:
83 # if both "room_invite_state_types" and "room_prejoin_state" are set, then
84 # we don't really know what to do.
85 if room_prejoin_state_config:
86 raise ConfigError(
87 "Can't specify both 'room_invite_state_types' and 'room_prejoin_state' "
88 "in config"
89 )
90
91 logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)
92
93 yield from config["room_invite_state_types"]
94 return
95
96 if not room_prejoin_state_config.get("disable_default_event_types"):
97 yield from _DEFAULT_PREJOIN_STATE_TYPES
98
99 yield from room_prejoin_state_config.get("additional_event_types", [])
100
101
102 _ROOM_INVITE_STATE_TYPES_WARNING = """\
103 WARNING: The 'room_invite_state_types' configuration setting is now deprecated,
104 and replaced with 'room_prejoin_state'. New features may not work correctly
105 unless 'room_invite_state_types' is removed. See the sample configuration file for
106 details of 'room_prejoin_state'.
107 --------------------------------------------------------------------------------
108 """
109
110 _DEFAULT_PREJOIN_STATE_TYPES = [
111 EventTypes.JoinRules,
112 EventTypes.CanonicalAlias,
113 EventTypes.RoomAvatar,
114 EventTypes.RoomEncryption,
115 EventTypes.Name,
116 # Per MSC1772.
117 EventTypes.Create,
118 # Per MSC3173.
119 EventTypes.Topic,
120 ]
121
122
123 # room_prejoin_state can either be None (as it is in the default config), or
124 # an object containing other config settings
125 _ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {
126 "oneOf": [
127 {
128 "type": "object",
129 "properties": {
130 "disable_default_event_types": {"type": "boolean"},
131 "additional_event_types": {
132 "type": "array",
133 "items": {"type": "string"},
134 },
135 },
136 },
137 {"type": "null"},
138 ]
139 }
140
141 # the legacy room_invite_state_types setting
142 _ROOM_INVITE_STATE_TYPES_SCHEMA = {"type": "array", "items": {"type": "string"}}
143
144 _MAIN_SCHEMA = {
145 "type": "object",
146 "properties": {
147 "room_prejoin_state": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA,
148 "room_invite_state_types": _ROOM_INVITE_STATE_TYPES_SCHEMA,
149 "track_puppeted_user_ips": {
150 "type": "boolean",
151 },
152 },
153 }
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/synapse/config/api.py b/synapse/config/api.py
--- a/synapse/config/api.py
+++ b/synapse/config/api.py
@@ -61,10 +61,18 @@
#additional_event_types:
# - org.example.custom.event.type
- # By default when puppeting another user, the user who has created the
- # access token for puppeting is tracked. If this is enabled, both
- # requests are tracked. Implicitly enables MAU tracking for puppeted users.
- # Uncomment to also track puppeted user IP's.
+ # We record the IP address of clients used to access the API for various
+ # reasons, including displaying it to the user in the "Where you're signed in"
+ # dialog.
+ #
+ # By default, when puppeting another user via the admin API, the client IP
+ # address is recorded against the user who created the access token (ie, the
+ # admin user), and *not* the puppeted user.
+ #
+ # Uncomment the following to also record the IP address against the puppeted
+ # user. (This also means that the puppeted user will count as an "active" user
+ # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc
+ # above.)
#
#track_puppeted_user_ips: true
""" % {
|
{"golden_diff": "diff --git a/synapse/config/api.py b/synapse/config/api.py\n--- a/synapse/config/api.py\n+++ b/synapse/config/api.py\n@@ -61,10 +61,18 @@\n #additional_event_types:\n # - org.example.custom.event.type\n \n- # By default when puppeting another user, the user who has created the\n- # access token for puppeting is tracked. If this is enabled, both\n- # requests are tracked. Implicitly enables MAU tracking for puppeted users.\n- # Uncomment to also track puppeted user IP's.\n+ # We record the IP address of clients used to access the API for various\n+ # reasons, including displaying it to the user in the \"Where you're signed in\"\n+ # dialog.\n+ #\n+ # By default, when puppeting another user via the admin API, the client IP\n+ # address is recorded against the user who created the access token (ie, the\n+ # admin user), and *not* the puppeted user.\n+ #\n+ # Uncomment the following to also record the IP address against the puppeted\n+ # user. (This also means that the puppeted user will count as an \"active\" user\n+ # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc\n+ # above.)\n #\n #track_puppeted_user_ips: true\n \"\"\" % {\n", "issue": "sample doc in #11561 is malformatted\nsee https://github.com/matrix-org/synapse/pull/11561#pullrequestreview-851817482\n", "before_files": [{"content": "# Copyright 2015-2021 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import Iterable\n\nfrom synapse.api.constants import EventTypes\nfrom synapse.config._base import Config, ConfigError\nfrom synapse.config._util import validate_config\nfrom synapse.types import JsonDict\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApiConfig(Config):\n section = \"api\"\n\n def read_config(self, config: JsonDict, **kwargs):\n validate_config(_MAIN_SCHEMA, config, ())\n self.room_prejoin_state = list(self._get_prejoin_state_types(config))\n self.track_puppeted_user_ips = config.get(\"track_puppeted_user_ips\", False)\n\n def generate_config_section(cls, **kwargs) -> str:\n formatted_default_state_types = \"\\n\".join(\n \" # - %s\" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES\n )\n\n return \"\"\"\\\n ## API Configuration ##\n\n # Controls for the state that is shared with users who receive an invite\n # to a room\n #\n room_prejoin_state:\n # By default, the following state event types are shared with users who\n # receive invites to the room:\n #\n%(formatted_default_state_types)s\n #\n # Uncomment the following to disable these defaults (so that only the event\n # types listed in 'additional_event_types' are shared). Defaults to 'false'.\n #\n #disable_default_event_types: true\n\n # Additional state event types to share with users when they are invited\n # to a room.\n #\n # By default, this list is empty (so only the default event types are shared).\n #\n #additional_event_types:\n # - org.example.custom.event.type\n\n # By default when puppeting another user, the user who has created the\n # access token for puppeting is tracked. If this is enabled, both\n # requests are tracked. Implicitly enables MAU tracking for puppeted users.\n # Uncomment to also track puppeted user IP's.\n #\n #track_puppeted_user_ips: true\n \"\"\" % {\n \"formatted_default_state_types\": formatted_default_state_types\n }\n\n def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:\n \"\"\"Get the event types to include in the prejoin state\n\n Parses the config and returns an iterable of the event types to be included.\n \"\"\"\n room_prejoin_state_config = config.get(\"room_prejoin_state\") or {}\n\n # backwards-compatibility support for room_invite_state_types\n if \"room_invite_state_types\" in config:\n # if both \"room_invite_state_types\" and \"room_prejoin_state\" are set, then\n # we don't really know what to do.\n if room_prejoin_state_config:\n raise ConfigError(\n \"Can't specify both 'room_invite_state_types' and 'room_prejoin_state' \"\n \"in config\"\n )\n\n logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)\n\n yield from config[\"room_invite_state_types\"]\n return\n\n if not room_prejoin_state_config.get(\"disable_default_event_types\"):\n yield from _DEFAULT_PREJOIN_STATE_TYPES\n\n yield from room_prejoin_state_config.get(\"additional_event_types\", [])\n\n\n_ROOM_INVITE_STATE_TYPES_WARNING = \"\"\"\\\nWARNING: The 'room_invite_state_types' configuration setting is now deprecated,\nand replaced with 'room_prejoin_state'. New features may not work correctly\nunless 'room_invite_state_types' is removed. See the sample configuration file for\ndetails of 'room_prejoin_state'.\n--------------------------------------------------------------------------------\n\"\"\"\n\n_DEFAULT_PREJOIN_STATE_TYPES = [\n EventTypes.JoinRules,\n EventTypes.CanonicalAlias,\n EventTypes.RoomAvatar,\n EventTypes.RoomEncryption,\n EventTypes.Name,\n # Per MSC1772.\n EventTypes.Create,\n # Per MSC3173.\n EventTypes.Topic,\n]\n\n\n# room_prejoin_state can either be None (as it is in the default config), or\n# an object containing other config settings\n_ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {\n \"oneOf\": [\n {\n \"type\": \"object\",\n \"properties\": {\n \"disable_default_event_types\": {\"type\": \"boolean\"},\n \"additional_event_types\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n },\n },\n {\"type\": \"null\"},\n ]\n}\n\n# the legacy room_invite_state_types setting\n_ROOM_INVITE_STATE_TYPES_SCHEMA = {\"type\": \"array\", \"items\": {\"type\": \"string\"}}\n\n_MAIN_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"room_prejoin_state\": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA,\n \"room_invite_state_types\": _ROOM_INVITE_STATE_TYPES_SCHEMA,\n \"track_puppeted_user_ips\": {\n \"type\": \"boolean\",\n },\n },\n}\n", "path": "synapse/config/api.py"}], "after_files": [{"content": "# Copyright 2015-2021 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import Iterable\n\nfrom synapse.api.constants import EventTypes\nfrom synapse.config._base import Config, ConfigError\nfrom synapse.config._util import validate_config\nfrom synapse.types import JsonDict\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApiConfig(Config):\n section = \"api\"\n\n def read_config(self, config: JsonDict, **kwargs):\n validate_config(_MAIN_SCHEMA, config, ())\n self.room_prejoin_state = list(self._get_prejoin_state_types(config))\n self.track_puppeted_user_ips = config.get(\"track_puppeted_user_ips\", False)\n\n def generate_config_section(cls, **kwargs) -> str:\n formatted_default_state_types = \"\\n\".join(\n \" # - %s\" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES\n )\n\n return \"\"\"\\\n ## API Configuration ##\n\n # Controls for the state that is shared with users who receive an invite\n # to a room\n #\n room_prejoin_state:\n # By default, the following state event types are shared with users who\n # receive invites to the room:\n #\n%(formatted_default_state_types)s\n #\n # Uncomment the following to disable these defaults (so that only the event\n # types listed in 'additional_event_types' are shared). Defaults to 'false'.\n #\n #disable_default_event_types: true\n\n # Additional state event types to share with users when they are invited\n # to a room.\n #\n # By default, this list is empty (so only the default event types are shared).\n #\n #additional_event_types:\n # - org.example.custom.event.type\n\n # We record the IP address of clients used to access the API for various\n # reasons, including displaying it to the user in the \"Where you're signed in\"\n # dialog.\n #\n # By default, when puppeting another user via the admin API, the client IP\n # address is recorded against the user who created the access token (ie, the\n # admin user), and *not* the puppeted user.\n #\n # Uncomment the following to also record the IP address against the puppeted\n # user. (This also means that the puppeted user will count as an \"active\" user\n # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc\n # above.)\n #\n #track_puppeted_user_ips: true\n \"\"\" % {\n \"formatted_default_state_types\": formatted_default_state_types\n }\n\n def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]:\n \"\"\"Get the event types to include in the prejoin state\n\n Parses the config and returns an iterable of the event types to be included.\n \"\"\"\n room_prejoin_state_config = config.get(\"room_prejoin_state\") or {}\n\n # backwards-compatibility support for room_invite_state_types\n if \"room_invite_state_types\" in config:\n # if both \"room_invite_state_types\" and \"room_prejoin_state\" are set, then\n # we don't really know what to do.\n if room_prejoin_state_config:\n raise ConfigError(\n \"Can't specify both 'room_invite_state_types' and 'room_prejoin_state' \"\n \"in config\"\n )\n\n logger.warning(_ROOM_INVITE_STATE_TYPES_WARNING)\n\n yield from config[\"room_invite_state_types\"]\n return\n\n if not room_prejoin_state_config.get(\"disable_default_event_types\"):\n yield from _DEFAULT_PREJOIN_STATE_TYPES\n\n yield from room_prejoin_state_config.get(\"additional_event_types\", [])\n\n\n_ROOM_INVITE_STATE_TYPES_WARNING = \"\"\"\\\nWARNING: The 'room_invite_state_types' configuration setting is now deprecated,\nand replaced with 'room_prejoin_state'. New features may not work correctly\nunless 'room_invite_state_types' is removed. See the sample configuration file for\ndetails of 'room_prejoin_state'.\n--------------------------------------------------------------------------------\n\"\"\"\n\n_DEFAULT_PREJOIN_STATE_TYPES = [\n EventTypes.JoinRules,\n EventTypes.CanonicalAlias,\n EventTypes.RoomAvatar,\n EventTypes.RoomEncryption,\n EventTypes.Name,\n # Per MSC1772.\n EventTypes.Create,\n # Per MSC3173.\n EventTypes.Topic,\n]\n\n\n# room_prejoin_state can either be None (as it is in the default config), or\n# an object containing other config settings\n_ROOM_PREJOIN_STATE_CONFIG_SCHEMA = {\n \"oneOf\": [\n {\n \"type\": \"object\",\n \"properties\": {\n \"disable_default_event_types\": {\"type\": \"boolean\"},\n \"additional_event_types\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"},\n },\n },\n },\n {\"type\": \"null\"},\n ]\n}\n\n# the legacy room_invite_state_types setting\n_ROOM_INVITE_STATE_TYPES_SCHEMA = {\"type\": \"array\", \"items\": {\"type\": \"string\"}}\n\n_MAIN_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"room_prejoin_state\": _ROOM_PREJOIN_STATE_CONFIG_SCHEMA,\n \"room_invite_state_types\": _ROOM_INVITE_STATE_TYPES_SCHEMA,\n \"track_puppeted_user_ips\": {\n \"type\": \"boolean\",\n },\n },\n}\n", "path": "synapse/config/api.py"}]}
| 1,848 | 320 |
gh_patches_debug_44351
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Cannot assemble flow with missing content" when exporting streamed flow in raw format
#### Problem Description
When a response (and possibly request, haven't checked that) is streamed and user tries to command `:export.clip raw @focus`, there is an exception:
```
Traceback (most recent call last):
File "/Users/korran/Projects/mitmproxy/mitmproxy/master.py", line 54, in run_loop
loop()
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py", line 287, in run
self._run()
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py", line 385, in _run
self.event_loop.run()
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py", line 1494, in run
reraise(*exc_info)
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/compat.py", line 58, in reraise
raise value
File "/usr/local/Cellar/[email protected]/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/raw_display.py", line 416, in <lambda>
wrapper = lambda: self.parse_input(
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/raw_display.py", line 515, in parse_input
callback(processed, processed_codes)
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py", line 412, in _update
self.process_input(keys)
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py", line 513, in process_input
k = self._topmost_widget.keypress(self.screen_size, k)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/window.py", line 316, in keypress
k = super().keypress(size, k)
File "/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/container.py", line 1123, in keypress
return self.footer.keypress((maxcol,),key)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py", line 201, in keypress
return self.ab.keypress(*args, **kwargs)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py", line 149, in keypress
self.prompt_execute(text)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py", line 169, in prompt_execute
msg = p(txt)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py", line 115, in execute_command
execute(txt)
File "/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/commandexecutor.py", line 18, in __call__
ret = self.master.commands.execute(cmd)
File "/Users/korran/Projects/mitmproxy/mitmproxy/command.py", line 273, in execute
return self.call_strings(command_name, args)
File "/Users/korran/Projects/mitmproxy/mitmproxy/command.py", line 259, in call_strings
return self.commands[command_name].call(args)
File "/Users/korran/Projects/mitmproxy/mitmproxy/command.py", line 129, in call
ret = self.func(*bound_args.args, **bound_args.kwargs)
File "/Users/korran/Projects/mitmproxy/mitmproxy/command.py", line 303, in wrapper
return function(*args, **kwargs)
File "/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py", line 189, in clip
v = strutils.always_str(func(flow))
File "/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py", line 121, in raw
return b"".join([raw_request(f), separator, raw_response(f)])
File "/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py", line 109, in raw_response
return assemble.assemble_response(cleanup_response(f))
File "/Users/korran/Projects/mitmproxy/mitmproxy/net/http/http1/assemble.py", line 17, in assemble_response
raise ValueError("Cannot assemble flow with missing content")
ValueError: Cannot assemble flow with missing content
```
#### Steps to reproduce the behavior:
1. Run mitmproxy with `--set stream_large_bodies=10k`
2. `curl -x 127.0.0.1 www.google.com 1>/dev/null`
3. `:export.clip raw @focus`
#### System Information
```
Mitmproxy: 8.0.0.dev (+17, commit 13131e2)
Python: 3.9.6
OpenSSL: OpenSSL 1.1.1i 8 Dec 2020
Platform: macOS-11.5.1-x86_64-i386-64bit
```
This exception is explicitly raised when `flow.response.content` is `None`, but I think it's still valuable to export whatever is available in a flow. I was thinking about setting some artificial content (or `b''`) for streamed request/response, but it doesn't seem like a good idea.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/export.py`
Content:
```
1 import shlex
2 import typing
3
4 import pyperclip
5
6 import mitmproxy.types
7 from mitmproxy import command
8 from mitmproxy import ctx, http
9 from mitmproxy import exceptions
10 from mitmproxy import flow
11 from mitmproxy.net.http.http1 import assemble
12 from mitmproxy.utils import strutils
13
14
15 def cleanup_request(f: flow.Flow) -> http.Request:
16 if not getattr(f, "request", None):
17 raise exceptions.CommandError("Can't export flow with no request.")
18 assert isinstance(f, http.HTTPFlow)
19 request = f.request.copy()
20 request.decode(strict=False)
21 return request
22
23
24 def pop_headers(request: http.Request) -> http.Request:
25 # Remove some headers that are redundant for curl/httpie export
26 request.headers.pop('content-length')
27 if request.headers.get("host", "") == request.host:
28 request.headers.pop("host")
29 if request.headers.get(":authority", "") == request.host:
30 request.headers.pop(":authority")
31 return request
32
33
34 def cleanup_response(f: flow.Flow) -> http.Response:
35 if not getattr(f, "response", None):
36 raise exceptions.CommandError("Can't export flow with no response.")
37 assert isinstance(f, http.HTTPFlow)
38 response = f.response.copy() # type: ignore
39 response.decode(strict=False)
40 return response
41
42
43 def request_content_for_console(request: http.Request) -> str:
44 try:
45 text = request.get_text(strict=True)
46 assert text
47 except ValueError:
48 # shlex.quote doesn't support a bytes object
49 # see https://github.com/python/cpython/pull/10871
50 raise exceptions.CommandError("Request content must be valid unicode")
51 escape_control_chars = {chr(i): f"\\x{i:02x}" for i in range(32)}
52 return "".join(
53 escape_control_chars.get(x, x)
54 for x in text
55 )
56
57
58 def curl_command(f: flow.Flow, preserve_ip: bool = False) -> str:
59 request = cleanup_request(f)
60 request = pop_headers(request)
61 args = ["curl"]
62
63 server_addr = f.server_conn.peername[0] if f.server_conn.peername else None
64
65 if preserve_ip and server_addr and request.pretty_host != server_addr:
66 resolve = "{}:{}:[{}]".format(request.pretty_host, request.port, server_addr)
67 args.append("--resolve")
68 args.append(resolve)
69
70 for k, v in request.headers.items(multi=True):
71 if k.lower() == "accept-encoding":
72 args.append("--compressed")
73 else:
74 args += ["-H", f"{k}: {v}"]
75
76 if request.method != "GET":
77 args += ["-X", request.method]
78
79 args.append(request.pretty_url)
80
81 if request.content:
82 args += ["-d", request_content_for_console(request)]
83 return ' '.join(shlex.quote(arg) for arg in args)
84
85
86 def httpie_command(f: flow.Flow) -> str:
87 request = cleanup_request(f)
88 request = pop_headers(request)
89
90 # TODO: Once https://github.com/httpie/httpie/issues/414 is implemented, we
91 # should ensure we always connect to the IP address specified in the flow,
92 # similar to how it's done in curl_command.
93 url = request.pretty_url
94
95 args = ["http", request.method, url]
96 for k, v in request.headers.items(multi=True):
97 args.append(f"{k}: {v}")
98 cmd = ' '.join(shlex.quote(arg) for arg in args)
99 if request.content:
100 cmd += " <<< " + shlex.quote(request_content_for_console(request))
101 return cmd
102
103
104 def raw_request(f: flow.Flow) -> bytes:
105 return assemble.assemble_request(cleanup_request(f))
106
107
108 def raw_response(f: flow.Flow) -> bytes:
109 return assemble.assemble_response(cleanup_response(f))
110
111
112 def raw(f: flow.Flow, separator=b"\r\n\r\n") -> bytes:
113 """Return either the request or response if only one exists, otherwise return both"""
114 request_present = hasattr(f, "request") and f.request # type: ignore
115 response_present = hasattr(f, "response") and f.response # type: ignore
116
117 if not (request_present or response_present):
118 raise exceptions.CommandError("Can't export flow with no request or response.")
119
120 if request_present and response_present:
121 return b"".join([raw_request(f), separator, raw_response(f)])
122 elif not request_present:
123 return raw_response(f)
124 else:
125 return raw_request(f)
126
127
128 formats = dict(
129 curl=curl_command,
130 httpie=httpie_command,
131 raw=raw,
132 raw_request=raw_request,
133 raw_response=raw_response,
134 )
135
136
137 class Export():
138 def load(self, loader):
139 loader.add_option(
140 "export_preserve_original_ip", bool, False,
141 """
142 When exporting a request as an external command, make an effort to
143 connect to the same IP as in the original request. This helps with
144 reproducibility in cases where the behaviour depends on the
145 particular host we are connecting to. Currently this only affects
146 curl exports.
147 """
148 )
149
150 @command.command("export.formats")
151 def formats(self) -> typing.Sequence[str]:
152 """
153 Return a list of the supported export formats.
154 """
155 return list(sorted(formats.keys()))
156
157 @command.command("export.file")
158 def file(self, format: str, flow: flow.Flow, path: mitmproxy.types.Path) -> None:
159 """
160 Export a flow to path.
161 """
162 if format not in formats:
163 raise exceptions.CommandError("No such export format: %s" % format)
164 func: typing.Any = formats[format]
165 if format == "curl":
166 v = func(flow, preserve_ip=ctx.options.export_preserve_original_ip)
167 else:
168 v = func(flow)
169 try:
170 with open(path, "wb") as fp:
171 if isinstance(v, bytes):
172 fp.write(v)
173 else:
174 fp.write(v.encode("utf-8"))
175 except OSError as e:
176 ctx.log.error(str(e))
177
178 @command.command("export.clip")
179 def clip(self, format: str, flow: flow.Flow) -> None:
180 """
181 Export a flow to the system clipboard.
182 """
183 if format not in formats:
184 raise exceptions.CommandError("No such export format: %s" % format)
185 func: typing.Any = formats[format]
186 if format == "curl":
187 v = strutils.always_str(func(flow, preserve_ip=ctx.options.export_preserve_original_ip))
188 else:
189 v = strutils.always_str(func(flow))
190 try:
191 pyperclip.copy(v)
192 except pyperclip.PyperclipException as e:
193 ctx.log.error(str(e))
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/addons/export.py b/mitmproxy/addons/export.py
--- a/mitmproxy/addons/export.py
+++ b/mitmproxy/addons/export.py
@@ -55,14 +55,14 @@
)
-def curl_command(f: flow.Flow, preserve_ip: bool = False) -> str:
+def curl_command(f: flow.Flow) -> str:
request = cleanup_request(f)
request = pop_headers(request)
args = ["curl"]
server_addr = f.server_conn.peername[0] if f.server_conn.peername else None
- if preserve_ip and server_addr and request.pretty_host != server_addr:
+ if ctx.options.export_preserve_original_ip and server_addr and request.pretty_host != server_addr:
resolve = "{}:{}:[{}]".format(request.pretty_host, request.port, server_addr)
args.append("--resolve")
args.append(resolve)
@@ -102,30 +102,35 @@
def raw_request(f: flow.Flow) -> bytes:
- return assemble.assemble_request(cleanup_request(f))
+ request = cleanup_request(f)
+ if request.raw_content is None:
+ raise exceptions.CommandError("Request content missing.")
+ return assemble.assemble_request(request)
def raw_response(f: flow.Flow) -> bytes:
- return assemble.assemble_response(cleanup_response(f))
+ response = cleanup_response(f)
+ if response.raw_content is None:
+ raise exceptions.CommandError("Response content missing.")
+ return assemble.assemble_response(response)
def raw(f: flow.Flow, separator=b"\r\n\r\n") -> bytes:
"""Return either the request or response if only one exists, otherwise return both"""
- request_present = hasattr(f, "request") and f.request # type: ignore
- response_present = hasattr(f, "response") and f.response # type: ignore
-
- if not (request_present or response_present):
- raise exceptions.CommandError("Can't export flow with no request or response.")
+ request_present = isinstance(f, http.HTTPFlow) and f.request and f.request.raw_content is not None
+ response_present = isinstance(f, http.HTTPFlow) and f.response and f.response.raw_content is not None
if request_present and response_present:
return b"".join([raw_request(f), separator, raw_response(f)])
- elif not request_present:
+ elif request_present:
+ return raw_request(f)
+ elif response_present:
return raw_response(f)
else:
- return raw_request(f)
+ raise exceptions.CommandError("Can't export flow with no request or response.")
-formats = dict(
+formats: typing.Dict[str, typing.Callable[[flow.Flow], typing.Union[str, bytes]]] = dict(
curl=curl_command,
httpie=httpie_command,
raw=raw,
@@ -134,7 +139,7 @@
)
-class Export():
+class Export:
def load(self, loader):
loader.add_option(
"export_preserve_original_ip", bool, False,
@@ -162,10 +167,7 @@
if format not in formats:
raise exceptions.CommandError("No such export format: %s" % format)
func: typing.Any = formats[format]
- if format == "curl":
- v = func(flow, preserve_ip=ctx.options.export_preserve_original_ip)
- else:
- v = func(flow)
+ v = func(flow)
try:
with open(path, "wb") as fp:
if isinstance(v, bytes):
@@ -176,18 +178,16 @@
ctx.log.error(str(e))
@command.command("export.clip")
- def clip(self, format: str, flow: flow.Flow) -> None:
+ def clip(self, format: str, f: flow.Flow) -> None:
"""
Export a flow to the system clipboard.
"""
if format not in formats:
raise exceptions.CommandError("No such export format: %s" % format)
- func: typing.Any = formats[format]
- if format == "curl":
- v = strutils.always_str(func(flow, preserve_ip=ctx.options.export_preserve_original_ip))
- else:
- v = strutils.always_str(func(flow))
+ func = formats[format]
+
+ val = strutils.always_str(func(f), "utf8", "backslashreplace")
try:
- pyperclip.copy(v)
+ pyperclip.copy(val)
except pyperclip.PyperclipException as e:
ctx.log.error(str(e))
|
{"golden_diff": "diff --git a/mitmproxy/addons/export.py b/mitmproxy/addons/export.py\n--- a/mitmproxy/addons/export.py\n+++ b/mitmproxy/addons/export.py\n@@ -55,14 +55,14 @@\n )\n \n \n-def curl_command(f: flow.Flow, preserve_ip: bool = False) -> str:\n+def curl_command(f: flow.Flow) -> str:\n request = cleanup_request(f)\n request = pop_headers(request)\n args = [\"curl\"]\n \n server_addr = f.server_conn.peername[0] if f.server_conn.peername else None\n \n- if preserve_ip and server_addr and request.pretty_host != server_addr:\n+ if ctx.options.export_preserve_original_ip and server_addr and request.pretty_host != server_addr:\n resolve = \"{}:{}:[{}]\".format(request.pretty_host, request.port, server_addr)\n args.append(\"--resolve\")\n args.append(resolve)\n@@ -102,30 +102,35 @@\n \n \n def raw_request(f: flow.Flow) -> bytes:\n- return assemble.assemble_request(cleanup_request(f))\n+ request = cleanup_request(f)\n+ if request.raw_content is None:\n+ raise exceptions.CommandError(\"Request content missing.\")\n+ return assemble.assemble_request(request)\n \n \n def raw_response(f: flow.Flow) -> bytes:\n- return assemble.assemble_response(cleanup_response(f))\n+ response = cleanup_response(f)\n+ if response.raw_content is None:\n+ raise exceptions.CommandError(\"Response content missing.\")\n+ return assemble.assemble_response(response)\n \n \n def raw(f: flow.Flow, separator=b\"\\r\\n\\r\\n\") -> bytes:\n \"\"\"Return either the request or response if only one exists, otherwise return both\"\"\"\n- request_present = hasattr(f, \"request\") and f.request # type: ignore\n- response_present = hasattr(f, \"response\") and f.response # type: ignore\n-\n- if not (request_present or response_present):\n- raise exceptions.CommandError(\"Can't export flow with no request or response.\")\n+ request_present = isinstance(f, http.HTTPFlow) and f.request and f.request.raw_content is not None\n+ response_present = isinstance(f, http.HTTPFlow) and f.response and f.response.raw_content is not None\n \n if request_present and response_present:\n return b\"\".join([raw_request(f), separator, raw_response(f)])\n- elif not request_present:\n+ elif request_present:\n+ return raw_request(f)\n+ elif response_present:\n return raw_response(f)\n else:\n- return raw_request(f)\n+ raise exceptions.CommandError(\"Can't export flow with no request or response.\")\n \n \n-formats = dict(\n+formats: typing.Dict[str, typing.Callable[[flow.Flow], typing.Union[str, bytes]]] = dict(\n curl=curl_command,\n httpie=httpie_command,\n raw=raw,\n@@ -134,7 +139,7 @@\n )\n \n \n-class Export():\n+class Export:\n def load(self, loader):\n loader.add_option(\n \"export_preserve_original_ip\", bool, False,\n@@ -162,10 +167,7 @@\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n func: typing.Any = formats[format]\n- if format == \"curl\":\n- v = func(flow, preserve_ip=ctx.options.export_preserve_original_ip)\n- else:\n- v = func(flow)\n+ v = func(flow)\n try:\n with open(path, \"wb\") as fp:\n if isinstance(v, bytes):\n@@ -176,18 +178,16 @@\n ctx.log.error(str(e))\n \n @command.command(\"export.clip\")\n- def clip(self, format: str, flow: flow.Flow) -> None:\n+ def clip(self, format: str, f: flow.Flow) -> None:\n \"\"\"\n Export a flow to the system clipboard.\n \"\"\"\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n- func: typing.Any = formats[format]\n- if format == \"curl\":\n- v = strutils.always_str(func(flow, preserve_ip=ctx.options.export_preserve_original_ip))\n- else:\n- v = strutils.always_str(func(flow))\n+ func = formats[format]\n+\n+ val = strutils.always_str(func(f), \"utf8\", \"backslashreplace\")\n try:\n- pyperclip.copy(v)\n+ pyperclip.copy(val)\n except pyperclip.PyperclipException as e:\n ctx.log.error(str(e))\n", "issue": "\"Cannot assemble flow with missing content\" when exporting streamed flow in raw format\n#### Problem Description\r\nWhen a response (and possibly request, haven't checked that) is streamed and user tries to command `:export.clip raw @focus`, there is an exception:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/master.py\", line 54, in run_loop\r\n loop()\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py\", line 287, in run\r\n self._run()\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py\", line 385, in _run\r\n self.event_loop.run()\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py\", line 1494, in run\r\n reraise(*exc_info)\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/compat.py\", line 58, in reraise\r\n raise value\r\n File \"/usr/local/Cellar/[email protected]/3.9.6/Frameworks/Python.framework/Versions/3.9/lib/python3.9/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/raw_display.py\", line 416, in <lambda>\r\n wrapper = lambda: self.parse_input(\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/raw_display.py\", line 515, in parse_input\r\n callback(processed, processed_codes)\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py\", line 412, in _update\r\n self.process_input(keys)\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/main_loop.py\", line 513, in process_input\r\n k = self._topmost_widget.keypress(self.screen_size, k)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/window.py\", line 316, in keypress\r\n k = super().keypress(size, k)\r\n File \"/Users/korran/Projects/mitmproxy/venv/lib/python3.9/site-packages/urwid/container.py\", line 1123, in keypress\r\n return self.footer.keypress((maxcol,),key)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py\", line 201, in keypress\r\n return self.ab.keypress(*args, **kwargs)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py\", line 149, in keypress\r\n self.prompt_execute(text)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py\", line 169, in prompt_execute\r\n msg = p(txt)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/statusbar.py\", line 115, in execute_command\r\n execute(txt)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/tools/console/commandexecutor.py\", line 18, in __call__\r\n ret = self.master.commands.execute(cmd)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/command.py\", line 273, in execute\r\n return self.call_strings(command_name, args)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/command.py\", line 259, in call_strings\r\n return self.commands[command_name].call(args)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/command.py\", line 129, in call\r\n ret = self.func(*bound_args.args, **bound_args.kwargs)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/command.py\", line 303, in wrapper\r\n return function(*args, **kwargs)\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py\", line 189, in clip\r\n v = strutils.always_str(func(flow))\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py\", line 121, in raw\r\n return b\"\".join([raw_request(f), separator, raw_response(f)])\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/addons/export.py\", line 109, in raw_response\r\n return assemble.assemble_response(cleanup_response(f))\r\n File \"/Users/korran/Projects/mitmproxy/mitmproxy/net/http/http1/assemble.py\", line 17, in assemble_response\r\n raise ValueError(\"Cannot assemble flow with missing content\")\r\nValueError: Cannot assemble flow with missing content\r\n```\r\n#### Steps to reproduce the behavior:\r\n1. Run mitmproxy with `--set stream_large_bodies=10k`\r\n2. `curl -x 127.0.0.1 www.google.com 1>/dev/null`\r\n3. `:export.clip raw @focus`\r\n\r\n#### System Information\r\n```\r\nMitmproxy: 8.0.0.dev (+17, commit 13131e2)\r\nPython: 3.9.6\r\nOpenSSL: OpenSSL 1.1.1i 8 Dec 2020\r\nPlatform: macOS-11.5.1-x86_64-i386-64bit\r\n```\r\nThis exception is explicitly raised when `flow.response.content` is `None`, but I think it's still valuable to export whatever is available in a flow. I was thinking about setting some artificial content (or `b''`) for streamed request/response, but it doesn't seem like a good idea.\n", "before_files": [{"content": "import shlex\nimport typing\n\nimport pyperclip\n\nimport mitmproxy.types\nfrom mitmproxy import command\nfrom mitmproxy import ctx, http\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy.net.http.http1 import assemble\nfrom mitmproxy.utils import strutils\n\n\ndef cleanup_request(f: flow.Flow) -> http.Request:\n if not getattr(f, \"request\", None):\n raise exceptions.CommandError(\"Can't export flow with no request.\")\n assert isinstance(f, http.HTTPFlow)\n request = f.request.copy()\n request.decode(strict=False)\n return request\n\n\ndef pop_headers(request: http.Request) -> http.Request:\n # Remove some headers that are redundant for curl/httpie export\n request.headers.pop('content-length')\n if request.headers.get(\"host\", \"\") == request.host:\n request.headers.pop(\"host\")\n if request.headers.get(\":authority\", \"\") == request.host:\n request.headers.pop(\":authority\")\n return request\n\n\ndef cleanup_response(f: flow.Flow) -> http.Response:\n if not getattr(f, \"response\", None):\n raise exceptions.CommandError(\"Can't export flow with no response.\")\n assert isinstance(f, http.HTTPFlow)\n response = f.response.copy() # type: ignore\n response.decode(strict=False)\n return response\n\n\ndef request_content_for_console(request: http.Request) -> str:\n try:\n text = request.get_text(strict=True)\n assert text\n except ValueError:\n # shlex.quote doesn't support a bytes object\n # see https://github.com/python/cpython/pull/10871\n raise exceptions.CommandError(\"Request content must be valid unicode\")\n escape_control_chars = {chr(i): f\"\\\\x{i:02x}\" for i in range(32)}\n return \"\".join(\n escape_control_chars.get(x, x)\n for x in text\n )\n\n\ndef curl_command(f: flow.Flow, preserve_ip: bool = False) -> str:\n request = cleanup_request(f)\n request = pop_headers(request)\n args = [\"curl\"]\n\n server_addr = f.server_conn.peername[0] if f.server_conn.peername else None\n\n if preserve_ip and server_addr and request.pretty_host != server_addr:\n resolve = \"{}:{}:[{}]\".format(request.pretty_host, request.port, server_addr)\n args.append(\"--resolve\")\n args.append(resolve)\n\n for k, v in request.headers.items(multi=True):\n if k.lower() == \"accept-encoding\":\n args.append(\"--compressed\")\n else:\n args += [\"-H\", f\"{k}: {v}\"]\n\n if request.method != \"GET\":\n args += [\"-X\", request.method]\n\n args.append(request.pretty_url)\n\n if request.content:\n args += [\"-d\", request_content_for_console(request)]\n return ' '.join(shlex.quote(arg) for arg in args)\n\n\ndef httpie_command(f: flow.Flow) -> str:\n request = cleanup_request(f)\n request = pop_headers(request)\n\n # TODO: Once https://github.com/httpie/httpie/issues/414 is implemented, we\n # should ensure we always connect to the IP address specified in the flow,\n # similar to how it's done in curl_command.\n url = request.pretty_url\n\n args = [\"http\", request.method, url]\n for k, v in request.headers.items(multi=True):\n args.append(f\"{k}: {v}\")\n cmd = ' '.join(shlex.quote(arg) for arg in args)\n if request.content:\n cmd += \" <<< \" + shlex.quote(request_content_for_console(request))\n return cmd\n\n\ndef raw_request(f: flow.Flow) -> bytes:\n return assemble.assemble_request(cleanup_request(f))\n\n\ndef raw_response(f: flow.Flow) -> bytes:\n return assemble.assemble_response(cleanup_response(f))\n\n\ndef raw(f: flow.Flow, separator=b\"\\r\\n\\r\\n\") -> bytes:\n \"\"\"Return either the request or response if only one exists, otherwise return both\"\"\"\n request_present = hasattr(f, \"request\") and f.request # type: ignore\n response_present = hasattr(f, \"response\") and f.response # type: ignore\n\n if not (request_present or response_present):\n raise exceptions.CommandError(\"Can't export flow with no request or response.\")\n\n if request_present and response_present:\n return b\"\".join([raw_request(f), separator, raw_response(f)])\n elif not request_present:\n return raw_response(f)\n else:\n return raw_request(f)\n\n\nformats = dict(\n curl=curl_command,\n httpie=httpie_command,\n raw=raw,\n raw_request=raw_request,\n raw_response=raw_response,\n)\n\n\nclass Export():\n def load(self, loader):\n loader.add_option(\n \"export_preserve_original_ip\", bool, False,\n \"\"\"\n When exporting a request as an external command, make an effort to\n connect to the same IP as in the original request. This helps with\n reproducibility in cases where the behaviour depends on the\n particular host we are connecting to. Currently this only affects\n curl exports.\n \"\"\"\n )\n\n @command.command(\"export.formats\")\n def formats(self) -> typing.Sequence[str]:\n \"\"\"\n Return a list of the supported export formats.\n \"\"\"\n return list(sorted(formats.keys()))\n\n @command.command(\"export.file\")\n def file(self, format: str, flow: flow.Flow, path: mitmproxy.types.Path) -> None:\n \"\"\"\n Export a flow to path.\n \"\"\"\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n func: typing.Any = formats[format]\n if format == \"curl\":\n v = func(flow, preserve_ip=ctx.options.export_preserve_original_ip)\n else:\n v = func(flow)\n try:\n with open(path, \"wb\") as fp:\n if isinstance(v, bytes):\n fp.write(v)\n else:\n fp.write(v.encode(\"utf-8\"))\n except OSError as e:\n ctx.log.error(str(e))\n\n @command.command(\"export.clip\")\n def clip(self, format: str, flow: flow.Flow) -> None:\n \"\"\"\n Export a flow to the system clipboard.\n \"\"\"\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n func: typing.Any = formats[format]\n if format == \"curl\":\n v = strutils.always_str(func(flow, preserve_ip=ctx.options.export_preserve_original_ip))\n else:\n v = strutils.always_str(func(flow))\n try:\n pyperclip.copy(v)\n except pyperclip.PyperclipException as e:\n ctx.log.error(str(e))\n", "path": "mitmproxy/addons/export.py"}], "after_files": [{"content": "import shlex\nimport typing\n\nimport pyperclip\n\nimport mitmproxy.types\nfrom mitmproxy import command\nfrom mitmproxy import ctx, http\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy.net.http.http1 import assemble\nfrom mitmproxy.utils import strutils\n\n\ndef cleanup_request(f: flow.Flow) -> http.Request:\n if not getattr(f, \"request\", None):\n raise exceptions.CommandError(\"Can't export flow with no request.\")\n assert isinstance(f, http.HTTPFlow)\n request = f.request.copy()\n request.decode(strict=False)\n return request\n\n\ndef pop_headers(request: http.Request) -> http.Request:\n # Remove some headers that are redundant for curl/httpie export\n request.headers.pop('content-length')\n if request.headers.get(\"host\", \"\") == request.host:\n request.headers.pop(\"host\")\n if request.headers.get(\":authority\", \"\") == request.host:\n request.headers.pop(\":authority\")\n return request\n\n\ndef cleanup_response(f: flow.Flow) -> http.Response:\n if not getattr(f, \"response\", None):\n raise exceptions.CommandError(\"Can't export flow with no response.\")\n assert isinstance(f, http.HTTPFlow)\n response = f.response.copy() # type: ignore\n response.decode(strict=False)\n return response\n\n\ndef request_content_for_console(request: http.Request) -> str:\n try:\n text = request.get_text(strict=True)\n assert text\n except ValueError:\n # shlex.quote doesn't support a bytes object\n # see https://github.com/python/cpython/pull/10871\n raise exceptions.CommandError(\"Request content must be valid unicode\")\n escape_control_chars = {chr(i): f\"\\\\x{i:02x}\" for i in range(32)}\n return \"\".join(\n escape_control_chars.get(x, x)\n for x in text\n )\n\n\ndef curl_command(f: flow.Flow) -> str:\n request = cleanup_request(f)\n request = pop_headers(request)\n args = [\"curl\"]\n\n server_addr = f.server_conn.peername[0] if f.server_conn.peername else None\n\n if ctx.options.export_preserve_original_ip and server_addr and request.pretty_host != server_addr:\n resolve = \"{}:{}:[{}]\".format(request.pretty_host, request.port, server_addr)\n args.append(\"--resolve\")\n args.append(resolve)\n\n for k, v in request.headers.items(multi=True):\n if k.lower() == \"accept-encoding\":\n args.append(\"--compressed\")\n else:\n args += [\"-H\", f\"{k}: {v}\"]\n\n if request.method != \"GET\":\n args += [\"-X\", request.method]\n\n args.append(request.pretty_url)\n\n if request.content:\n args += [\"-d\", request_content_for_console(request)]\n return ' '.join(shlex.quote(arg) for arg in args)\n\n\ndef httpie_command(f: flow.Flow) -> str:\n request = cleanup_request(f)\n request = pop_headers(request)\n\n # TODO: Once https://github.com/httpie/httpie/issues/414 is implemented, we\n # should ensure we always connect to the IP address specified in the flow,\n # similar to how it's done in curl_command.\n url = request.pretty_url\n\n args = [\"http\", request.method, url]\n for k, v in request.headers.items(multi=True):\n args.append(f\"{k}: {v}\")\n cmd = ' '.join(shlex.quote(arg) for arg in args)\n if request.content:\n cmd += \" <<< \" + shlex.quote(request_content_for_console(request))\n return cmd\n\n\ndef raw_request(f: flow.Flow) -> bytes:\n request = cleanup_request(f)\n if request.raw_content is None:\n raise exceptions.CommandError(\"Request content missing.\")\n return assemble.assemble_request(request)\n\n\ndef raw_response(f: flow.Flow) -> bytes:\n response = cleanup_response(f)\n if response.raw_content is None:\n raise exceptions.CommandError(\"Response content missing.\")\n return assemble.assemble_response(response)\n\n\ndef raw(f: flow.Flow, separator=b\"\\r\\n\\r\\n\") -> bytes:\n \"\"\"Return either the request or response if only one exists, otherwise return both\"\"\"\n request_present = isinstance(f, http.HTTPFlow) and f.request and f.request.raw_content is not None\n response_present = isinstance(f, http.HTTPFlow) and f.response and f.response.raw_content is not None\n\n if request_present and response_present:\n return b\"\".join([raw_request(f), separator, raw_response(f)])\n elif request_present:\n return raw_request(f)\n elif response_present:\n return raw_response(f)\n else:\n raise exceptions.CommandError(\"Can't export flow with no request or response.\")\n\n\nformats: typing.Dict[str, typing.Callable[[flow.Flow], typing.Union[str, bytes]]] = dict(\n curl=curl_command,\n httpie=httpie_command,\n raw=raw,\n raw_request=raw_request,\n raw_response=raw_response,\n)\n\n\nclass Export:\n def load(self, loader):\n loader.add_option(\n \"export_preserve_original_ip\", bool, False,\n \"\"\"\n When exporting a request as an external command, make an effort to\n connect to the same IP as in the original request. This helps with\n reproducibility in cases where the behaviour depends on the\n particular host we are connecting to. Currently this only affects\n curl exports.\n \"\"\"\n )\n\n @command.command(\"export.formats\")\n def formats(self) -> typing.Sequence[str]:\n \"\"\"\n Return a list of the supported export formats.\n \"\"\"\n return list(sorted(formats.keys()))\n\n @command.command(\"export.file\")\n def file(self, format: str, flow: flow.Flow, path: mitmproxy.types.Path) -> None:\n \"\"\"\n Export a flow to path.\n \"\"\"\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n func: typing.Any = formats[format]\n v = func(flow)\n try:\n with open(path, \"wb\") as fp:\n if isinstance(v, bytes):\n fp.write(v)\n else:\n fp.write(v.encode(\"utf-8\"))\n except OSError as e:\n ctx.log.error(str(e))\n\n @command.command(\"export.clip\")\n def clip(self, format: str, f: flow.Flow) -> None:\n \"\"\"\n Export a flow to the system clipboard.\n \"\"\"\n if format not in formats:\n raise exceptions.CommandError(\"No such export format: %s\" % format)\n func = formats[format]\n\n val = strutils.always_str(func(f), \"utf8\", \"backslashreplace\")\n try:\n pyperclip.copy(val)\n except pyperclip.PyperclipException as e:\n ctx.log.error(str(e))\n", "path": "mitmproxy/addons/export.py"}]}
| 3,573 | 1,011 |
gh_patches_debug_10999
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-1021
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problematic ldap3 / pyasn1 version combinations
This is an issue to keep track of problematic combinations of ldap3 / pyasn1 versions. See #887, #911.
Functionality I check for each combination:
* STARTTLS
* Complete user info on /validate/check
* Size Limit < number of users
Everything tested on a62fd9b3d0555b43cf31be3c4092a549fd73f724
<table>
<tr>
<th></th><th>pyasn 0.1.9</th><th>pyasn 0.4.2</th>
<tr><td>ldap3 2.1.1</td><td>ok</td><td><ul><li>STARTTLS fails (#885)</li></ul></td></tr>
<tr><td>ldap3 2.3</td><td colspan="2">
<ul><li>Empty user list (#887)</li></ul></td></tr>
<tr><td>ldap3 2.4</td><td colspan="2">
<ul><li>Empty user list (#887)</li><li>Empty user info (#911)</li></ul></td></tr>
<tr><td>ldap3 2.4.1</td><td colspan=2">
<ul><li>Empty user list (#887)</li>
<li><b>Error on /validate/check and resolver error</b> in token list (#911)</li>
<li>cannot search for unicode users (#980)</li></td></tr>
</table>
Combinations in current 2.21.4 packages:
* ``pip install privacyidea``:
```
ldap3==2.1.1
privacyIDEA==2.21.4
pyasn1==0.4.2
```
* CentOS:
```
ldap3==2.3
privacyIDEA==2.21.4
pyasn1==0.4.2
```
* Ubuntu (Launchpad):
```
ldap3==2.1.1
privacyIDEA==2.21.4
pyasn1==0.1.9
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from setuptools import setup, find_packages
3 import os
4 import stat
5 import sys
6
7 #VERSION="2.1dev4"
8 VERSION="2.22"
9
10 # Taken from kennethreitz/requests/setup.py
11 package_directory = os.path.realpath(os.path.dirname(__file__))
12
13
14 def get_file_contents(file_path):
15 """Get the context of the file using full path name."""
16 content = ""
17 try:
18 full_path = os.path.join(package_directory, file_path)
19 content = open(full_path, 'r').read()
20 except:
21 print >> sys.stderr, "### could not open file {0!r}".format(file_path)
22 return content
23
24 def get_file_list(file_path):
25 full_path = os.path.join(package_directory, file_path)
26 file_list = os.listdir(full_path)
27 # now we need to add the path to the files
28 return [ file_path + f for f in file_list ]
29
30
31 install_requires = ["Flask>=0.10.1",
32 "Flask-Migrate>=1.2.0",
33 "Flask-SQLAlchemy>=2.0",
34 "Flask-Script>=2.0.5",
35 "Jinja2>=2.7.3",
36 "Mako>=0.9.1",
37 "MarkupSafe>=0.23",
38 "PyMySQL>=0.6.6",
39 "Pillow>=2.6.1",
40 "PyJWT>=1.3.0",
41 "PyYAML>=3.11",
42 "Pygments>=2.0.2",
43 "SQLAlchemy>=1.0.5",
44 "Werkzeug>=0.10.4",
45 "alembic>=0.6.7",
46 "argparse>=1.2.1",
47 "bcrypt>=1.1.0",
48 "beautifulsoup4>=4.3.2",
49 "cffi>=0.8.6",
50 "configobj>=5.0.6",
51 "docutils>=0.12",
52 "funcparserlib>=0.3.6",
53 "itsdangerous>=0.24",
54 "ldap3==2.1.1",
55 "netaddr>=0.7.12",
56 "passlib>=1.6.2",
57 "pyasn1>=0.1.7",
58 "pyOpenSSL>=0.15.1",
59 "pycparser>=2.10",
60 "pycrypto>=2.6.1",
61 "pyrad>=2.0",
62 "pyusb>=1.0.0b2",
63 "qrcode>=5.1",
64 "requests>=2.7.0",
65 "sqlsoup>=0.9.0",
66 "ecdsa>=0.13",
67 "lxml>=3.3",
68 "python-gnupg>=0.3.8",
69 "defusedxml>=0.4.1",
70 "flask-babel>=0.9"
71 ]
72
73 # For python 2.6 we need additional dependency importlib
74 try:
75 import importlib
76 except ImportError:
77 install_requires.append('importlib')
78
79
80 def get_man_pages(dir):
81 """
82 Get man pages in a directory.
83 :param dir:
84 :return: list of file names
85 """
86 files = os.listdir(dir)
87 r_files = []
88 for file in files:
89 if file.endswith(".1"):
90 r_files.append(dir + "/" + file)
91 return r_files
92
93
94 def get_scripts(dir):
95 """
96 Get files that are executable
97 :param dir:
98 :return: list of file names
99 """
100 files = os.listdir(dir)
101 r_files = []
102 for file in files:
103 if os.stat(dir + "/" + file)[stat.ST_MODE] & stat.S_IEXEC:
104 r_files.append(dir + "/" + file)
105 return r_files
106
107
108 setup(
109 name='privacyIDEA',
110 version=VERSION,
111 description='privacyIDEA: identity, multifactor authentication (OTP), '
112 'authorization, audit',
113 author='privacyidea.org',
114 license='AGPLv3',
115 author_email='[email protected]',
116 url='http://www.privacyidea.org',
117 keywords='OTP, two factor authentication, management, security',
118 packages=find_packages(),
119 scripts=["pi-manage"] + get_scripts("tools"),
120 extras_require={
121 'dev': ["Sphinx>=1.3.1",
122 "sphinxcontrib-httpdomain>=1.3.0"],
123 'test': ["coverage>=3.7.1",
124 "mock>=1.0.1",
125 "pyparsing>=2.0.3",
126 "nose>=1.3.4",
127 "responses>=0.4.0",
128 "six>=1.8.0"],
129 },
130 install_requires=install_requires,
131 include_package_data=True,
132 data_files=[('etc/privacyidea/',
133 ['deploy/apache/privacyideaapp.wsgi',
134 'deploy/privacyidea/dictionary',
135 'deploy/privacyidea/enckey',
136 'deploy/privacyidea/private.pem',
137 'deploy/privacyidea/public.pem']),
138 ('share/man/man1', get_man_pages("tools")),
139 ('lib/privacyidea/authmodules/FreeRADIUS',
140 ["authmodules/FreeRADIUS/LICENSE",
141 "authmodules/FreeRADIUS/privacyidea_radius.pm"]),
142 ('lib/privacyidea/authmodules/OTRS',
143 ["authmodules/OTRS/privacyIDEA.pm"]),
144 ('lib/privacyidea/migrations',
145 ["migrations/alembic.ini",
146 "migrations/env.py",
147 "migrations/README",
148 "migrations/script.py.mako"]),
149 ('lib/privacyidea/migrations/versions',
150 get_file_list("migrations/versions/"))
151 ],
152 classifiers=["Framework :: Flask",
153 "License :: OSI Approved :: "
154 "GNU Affero General Public License v3",
155 "Programming Language :: Python",
156 "Development Status :: 5 - Production/Stable",
157 "Topic :: Internet",
158 "Topic :: Security",
159 "Topic :: System ::"
160 " Systems Administration :: Authentication/Directory"
161 ],
162 #message_extractors={'privacyidea': [
163 # ('**.py', 'python', None),
164 # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},
165 zip_safe=False,
166 long_description=get_file_contents('README.rst')
167 )
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,10 +51,10 @@
"docutils>=0.12",
"funcparserlib>=0.3.6",
"itsdangerous>=0.24",
- "ldap3==2.1.1",
+ "ldap3>=2.5",
"netaddr>=0.7.12",
"passlib>=1.6.2",
- "pyasn1>=0.1.7",
+ "pyasn1>=0.4.2",
"pyOpenSSL>=0.15.1",
"pycparser>=2.10",
"pycrypto>=2.6.1",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,10 +51,10 @@\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n- \"ldap3==2.1.1\",\n+ \"ldap3>=2.5\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n- \"pyasn1>=0.1.7\",\n+ \"pyasn1>=0.4.2\",\n \"pyOpenSSL>=0.15.1\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n", "issue": "Problematic ldap3 / pyasn1 version combinations\nThis is an issue to keep track of problematic combinations of ldap3 / pyasn1 versions. See #887, #911.\r\n\r\nFunctionality I check for each combination:\r\n* STARTTLS\r\n* Complete user info on /validate/check\r\n* Size Limit < number of users\r\n\r\nEverything tested on a62fd9b3d0555b43cf31be3c4092a549fd73f724\r\n\r\n<table>\r\n<tr>\r\n<th></th><th>pyasn 0.1.9</th><th>pyasn 0.4.2</th>\r\n<tr><td>ldap3 2.1.1</td><td>ok</td><td><ul><li>STARTTLS fails (#885)</li></ul></td></tr>\r\n<tr><td>ldap3 2.3</td><td colspan=\"2\">\r\n<ul><li>Empty user list (#887)</li></ul></td></tr>\r\n<tr><td>ldap3 2.4</td><td colspan=\"2\">\r\n<ul><li>Empty user list (#887)</li><li>Empty user info (#911)</li></ul></td></tr>\r\n<tr><td>ldap3 2.4.1</td><td colspan=2\">\r\n<ul><li>Empty user list (#887)</li>\r\n<li><b>Error on /validate/check and resolver error</b> in token list (#911)</li>\r\n<li>cannot search for unicode users (#980)</li></td></tr>\r\n</table>\r\n\r\nCombinations in current 2.21.4 packages:\r\n* ``pip install privacyidea``:\r\n```\r\nldap3==2.1.1\r\nprivacyIDEA==2.21.4\r\npyasn1==0.4.2\r\n```\r\n* CentOS:\r\n```\r\nldap3==2.3\r\nprivacyIDEA==2.21.4\r\npyasn1==0.4.2\r\n```\r\n* Ubuntu (Launchpad):\r\n```\r\nldap3==2.1.1\r\nprivacyIDEA==2.21.4\r\npyasn1==0.1.9\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION=\"2.1dev4\"\nVERSION=\"2.22\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print >> sys.stderr, \"### could not open file {0!r}\".format(file_path)\n return content\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [ file_path + f for f in file_list ]\n\n\ninstall_requires = [\"Flask>=0.10.1\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Script>=2.0.5\",\n \"Jinja2>=2.7.3\",\n \"Mako>=0.9.1\",\n \"MarkupSafe>=0.23\",\n \"PyMySQL>=0.6.6\",\n \"Pillow>=2.6.1\",\n \"PyJWT>=1.3.0\",\n \"PyYAML>=3.11\",\n \"Pygments>=2.0.2\",\n \"SQLAlchemy>=1.0.5\",\n \"Werkzeug>=0.10.4\",\n \"alembic>=0.6.7\",\n \"argparse>=1.2.1\",\n \"bcrypt>=1.1.0\",\n \"beautifulsoup4>=4.3.2\",\n \"cffi>=0.8.6\",\n \"configobj>=5.0.6\",\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n \"ldap3==2.1.1\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n \"pyasn1>=0.1.7\",\n \"pyOpenSSL>=0.15.1\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n \"pyrad>=2.0\",\n \"pyusb>=1.0.0b2\",\n \"qrcode>=5.1\",\n \"requests>=2.7.0\",\n \"sqlsoup>=0.9.0\",\n \"ecdsa>=0.13\",\n \"lxml>=3.3\",\n \"python-gnupg>=0.3.8\",\n \"defusedxml>=0.4.1\",\n \"flask-babel>=0.9\"\n ]\n\n# For python 2.6 we need additional dependency importlib\ntry:\n import importlib\nexcept ImportError:\n install_requires.append('importlib')\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'dev': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\"],\n 'test': [\"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"pyparsing>=2.0.3\",\n \"nose>=1.3.4\",\n \"responses>=0.4.0\",\n \"six>=1.8.0\"],\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary',\n 'deploy/privacyidea/enckey',\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/authmodules/FreeRADIUS',\n [\"authmodules/FreeRADIUS/LICENSE\",\n \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\"))\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\"\n ],\n #message_extractors={'privacyidea': [\n # ('**.py', 'python', None),\n # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nimport os\nimport stat\nimport sys\n\n#VERSION=\"2.1dev4\"\nVERSION=\"2.22\"\n\n# Taken from kennethreitz/requests/setup.py\npackage_directory = os.path.realpath(os.path.dirname(__file__))\n\n\ndef get_file_contents(file_path):\n \"\"\"Get the context of the file using full path name.\"\"\"\n content = \"\"\n try:\n full_path = os.path.join(package_directory, file_path)\n content = open(full_path, 'r').read()\n except:\n print >> sys.stderr, \"### could not open file {0!r}\".format(file_path)\n return content\n\ndef get_file_list(file_path):\n full_path = os.path.join(package_directory, file_path)\n file_list = os.listdir(full_path)\n # now we need to add the path to the files\n return [ file_path + f for f in file_list ]\n\n\ninstall_requires = [\"Flask>=0.10.1\",\n \"Flask-Migrate>=1.2.0\",\n \"Flask-SQLAlchemy>=2.0\",\n \"Flask-Script>=2.0.5\",\n \"Jinja2>=2.7.3\",\n \"Mako>=0.9.1\",\n \"MarkupSafe>=0.23\",\n \"PyMySQL>=0.6.6\",\n \"Pillow>=2.6.1\",\n \"PyJWT>=1.3.0\",\n \"PyYAML>=3.11\",\n \"Pygments>=2.0.2\",\n \"SQLAlchemy>=1.0.5\",\n \"Werkzeug>=0.10.4\",\n \"alembic>=0.6.7\",\n \"argparse>=1.2.1\",\n \"bcrypt>=1.1.0\",\n \"beautifulsoup4>=4.3.2\",\n \"cffi>=0.8.6\",\n \"configobj>=5.0.6\",\n \"docutils>=0.12\",\n \"funcparserlib>=0.3.6\",\n \"itsdangerous>=0.24\",\n \"ldap3>=2.5\",\n \"netaddr>=0.7.12\",\n \"passlib>=1.6.2\",\n \"pyasn1>=0.4.2\",\n \"pyOpenSSL>=0.15.1\",\n \"pycparser>=2.10\",\n \"pycrypto>=2.6.1\",\n \"pyrad>=2.0\",\n \"pyusb>=1.0.0b2\",\n \"qrcode>=5.1\",\n \"requests>=2.7.0\",\n \"sqlsoup>=0.9.0\",\n \"ecdsa>=0.13\",\n \"lxml>=3.3\",\n \"python-gnupg>=0.3.8\",\n \"defusedxml>=0.4.1\",\n \"flask-babel>=0.9\"\n ]\n\n# For python 2.6 we need additional dependency importlib\ntry:\n import importlib\nexcept ImportError:\n install_requires.append('importlib')\n\n\ndef get_man_pages(dir):\n \"\"\"\n Get man pages in a directory.\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if file.endswith(\".1\"):\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\ndef get_scripts(dir):\n \"\"\"\n Get files that are executable\n :param dir: \n :return: list of file names\n \"\"\"\n files = os.listdir(dir)\n r_files = []\n for file in files:\n if os.stat(dir + \"/\" + file)[stat.ST_MODE] & stat.S_IEXEC:\n r_files.append(dir + \"/\" + file)\n return r_files\n\n\nsetup(\n name='privacyIDEA',\n version=VERSION,\n description='privacyIDEA: identity, multifactor authentication (OTP), '\n 'authorization, audit',\n author='privacyidea.org',\n license='AGPLv3',\n author_email='[email protected]',\n url='http://www.privacyidea.org',\n keywords='OTP, two factor authentication, management, security',\n packages=find_packages(),\n scripts=[\"pi-manage\"] + get_scripts(\"tools\"),\n extras_require={\n 'dev': [\"Sphinx>=1.3.1\",\n \"sphinxcontrib-httpdomain>=1.3.0\"],\n 'test': [\"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"pyparsing>=2.0.3\",\n \"nose>=1.3.4\",\n \"responses>=0.4.0\",\n \"six>=1.8.0\"],\n },\n install_requires=install_requires,\n include_package_data=True,\n data_files=[('etc/privacyidea/',\n ['deploy/apache/privacyideaapp.wsgi',\n 'deploy/privacyidea/dictionary',\n 'deploy/privacyidea/enckey',\n 'deploy/privacyidea/private.pem',\n 'deploy/privacyidea/public.pem']),\n ('share/man/man1', get_man_pages(\"tools\")),\n ('lib/privacyidea/authmodules/FreeRADIUS',\n [\"authmodules/FreeRADIUS/LICENSE\",\n \"authmodules/FreeRADIUS/privacyidea_radius.pm\"]),\n ('lib/privacyidea/authmodules/OTRS',\n [\"authmodules/OTRS/privacyIDEA.pm\"]),\n ('lib/privacyidea/migrations',\n [\"migrations/alembic.ini\",\n \"migrations/env.py\",\n \"migrations/README\",\n \"migrations/script.py.mako\"]),\n ('lib/privacyidea/migrations/versions',\n get_file_list(\"migrations/versions/\"))\n ],\n classifiers=[\"Framework :: Flask\",\n \"License :: OSI Approved :: \"\n \"GNU Affero General Public License v3\",\n \"Programming Language :: Python\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Internet\",\n \"Topic :: Security\",\n \"Topic :: System ::\"\n \" Systems Administration :: Authentication/Directory\"\n ],\n #message_extractors={'privacyidea': [\n # ('**.py', 'python', None),\n # ('static/**.html', 'html', {'input_encoding': 'utf-8'})]},\n zip_safe=False,\n long_description=get_file_contents('README.rst')\n)\n", "path": "setup.py"}]}
| 2,547 | 173 |
gh_patches_debug_4573
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-235
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some versions of git don't create .git/hooks directory
Noticed here: https://github.com/victorlin/bugbuzz-python/pull/1#issuecomment-104971132
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/install_uninstall.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import io
5 import logging
6 import os
7 import os.path
8 import stat
9 import sys
10
11 from pre_commit.logging_handler import LoggingHandler
12 from pre_commit.util import resource_filename
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 # This is used to identify the hook file we install
19 PREVIOUS_IDENTIFYING_HASHES = (
20 '4d9958c90bc262f47553e2c073f14cfe',
21 'd8ee923c46731b42cd95cc869add4062',
22 '49fd668cb42069aa1b6048464be5d395',
23 )
24
25
26 IDENTIFYING_HASH = '79f09a650522a87b0da915d0d983b2de'
27
28
29 def is_our_pre_commit(filename):
30 return IDENTIFYING_HASH in io.open(filename).read()
31
32
33 def is_previous_pre_commit(filename):
34 contents = io.open(filename).read()
35 return any(hash in contents for hash in PREVIOUS_IDENTIFYING_HASHES)
36
37
38 def make_executable(filename):
39 original_mode = os.stat(filename).st_mode
40 os.chmod(
41 filename,
42 original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
43 )
44
45
46 def install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):
47 """Install the pre-commit hooks."""
48 hook_path = runner.get_hook_path(hook_type)
49 legacy_path = hook_path + '.legacy'
50
51 # If we have an existing hook, move it to pre-commit.legacy
52 if (
53 os.path.exists(hook_path) and
54 not is_our_pre_commit(hook_path) and
55 not is_previous_pre_commit(hook_path)
56 ):
57 os.rename(hook_path, legacy_path)
58
59 # If we specify overwrite, we simply delete the legacy file
60 if overwrite and os.path.exists(legacy_path):
61 os.remove(legacy_path)
62 elif os.path.exists(legacy_path):
63 print(
64 'Running in migration mode with existing hooks at {0}\n'
65 'Use -f to use only pre-commit.'.format(
66 legacy_path,
67 )
68 )
69
70 with io.open(hook_path, 'w') as pre_commit_file_obj:
71 if hook_type == 'pre-push':
72 with io.open(resource_filename('pre-push-tmpl')) as fp:
73 pre_push_contents = fp.read()
74 else:
75 pre_push_contents = ''
76
77 contents = io.open(resource_filename('hook-tmpl')).read().format(
78 sys_executable=sys.executable,
79 hook_type=hook_type,
80 pre_push=pre_push_contents,
81 )
82 pre_commit_file_obj.write(contents)
83 make_executable(hook_path)
84
85 print('pre-commit installed at {0}'.format(hook_path))
86
87 # If they requested we install all of the hooks, do so.
88 if hooks:
89 # Set up our logging handler
90 logger.addHandler(LoggingHandler(False))
91 logger.setLevel(logging.INFO)
92 for repository in runner.repositories:
93 repository.require_installed()
94
95 return 0
96
97
98 def uninstall(runner, hook_type='pre-commit'):
99 """Uninstall the pre-commit hooks."""
100 hook_path = runner.get_hook_path(hook_type)
101 legacy_path = hook_path + '.legacy'
102 # If our file doesn't exist or it isn't ours, gtfo.
103 if (
104 not os.path.exists(hook_path) or (
105 not is_our_pre_commit(hook_path) and
106 not is_previous_pre_commit(hook_path)
107 )
108 ):
109 return 0
110
111 os.remove(hook_path)
112 print('{0} uninstalled'.format(hook_type))
113
114 if os.path.exists(legacy_path):
115 os.rename(legacy_path, hook_path)
116 print('Restored previous hooks to {0}'.format(hook_path))
117
118 return 0
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -48,6 +48,9 @@
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
+ if not os.path.exists(os.path.dirname(hook_path)):
+ os.makedirs(os.path.dirname(hook_path))
+
# If we have an existing hook, move it to pre-commit.legacy
if (
os.path.exists(hook_path) and
|
{"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -48,6 +48,9 @@\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n \n+ if not os.path.exists(os.path.dirname(hook_path)):\n+ os.makedirs(os.path.dirname(hook_path))\n+\n # If we have an existing hook, move it to pre-commit.legacy\n if (\n os.path.exists(hook_path) and\n", "issue": "Some versions of git don't create .git/hooks directory\nNoticed here: https://github.com/victorlin/bugbuzz-python/pull/1#issuecomment-104971132\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os\nimport os.path\nimport stat\nimport sys\n\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\n# This is used to identify the hook file we install\nPREVIOUS_IDENTIFYING_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n)\n\n\nIDENTIFYING_HASH = '79f09a650522a87b0da915d0d983b2de'\n\n\ndef is_our_pre_commit(filename):\n return IDENTIFYING_HASH in io.open(filename).read()\n\n\ndef is_previous_pre_commit(filename):\n contents = io.open(filename).read()\n return any(hash in contents for hash in PREVIOUS_IDENTIFYING_HASHES)\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\ndef install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):\n \"\"\"Install the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n\n # If we have an existing hook, move it to pre-commit.legacy\n if (\n os.path.exists(hook_path) and\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n ):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n print(\n 'Running in migration mode with existing hooks at {0}\\n'\n 'Use -f to use only pre-commit.'.format(\n legacy_path,\n )\n )\n\n with io.open(hook_path, 'w') as pre_commit_file_obj:\n if hook_type == 'pre-push':\n with io.open(resource_filename('pre-push-tmpl')) as fp:\n pre_push_contents = fp.read()\n else:\n pre_push_contents = ''\n\n contents = io.open(resource_filename('hook-tmpl')).read().format(\n sys_executable=sys.executable,\n hook_type=hook_type,\n pre_push=pre_push_contents,\n )\n pre_commit_file_obj.write(contents)\n make_executable(hook_path)\n\n print('pre-commit installed at {0}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n # Set up our logging handler\n logger.addHandler(LoggingHandler(False))\n logger.setLevel(logging.INFO)\n for repository in runner.repositories:\n repository.require_installed()\n\n return 0\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if (\n not os.path.exists(hook_path) or (\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n )\n ):\n return 0\n\n os.remove(hook_path)\n print('{0} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n print('Restored previous hooks to {0}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport logging\nimport os\nimport os.path\nimport stat\nimport sys\n\nfrom pre_commit.logging_handler import LoggingHandler\nfrom pre_commit.util import resource_filename\n\n\nlogger = logging.getLogger('pre_commit')\n\n\n# This is used to identify the hook file we install\nPREVIOUS_IDENTIFYING_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n)\n\n\nIDENTIFYING_HASH = '79f09a650522a87b0da915d0d983b2de'\n\n\ndef is_our_pre_commit(filename):\n return IDENTIFYING_HASH in io.open(filename).read()\n\n\ndef is_previous_pre_commit(filename):\n contents = io.open(filename).read()\n return any(hash in contents for hash in PREVIOUS_IDENTIFYING_HASHES)\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename,\n original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\ndef install(runner, overwrite=False, hooks=False, hook_type='pre-commit'):\n \"\"\"Install the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n\n if not os.path.exists(os.path.dirname(hook_path)):\n os.makedirs(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if (\n os.path.exists(hook_path) and\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n ):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n print(\n 'Running in migration mode with existing hooks at {0}\\n'\n 'Use -f to use only pre-commit.'.format(\n legacy_path,\n )\n )\n\n with io.open(hook_path, 'w') as pre_commit_file_obj:\n if hook_type == 'pre-push':\n with io.open(resource_filename('pre-push-tmpl')) as fp:\n pre_push_contents = fp.read()\n else:\n pre_push_contents = ''\n\n contents = io.open(resource_filename('hook-tmpl')).read().format(\n sys_executable=sys.executable,\n hook_type=hook_type,\n pre_push=pre_push_contents,\n )\n pre_commit_file_obj.write(contents)\n make_executable(hook_path)\n\n print('pre-commit installed at {0}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n # Set up our logging handler\n logger.addHandler(LoggingHandler(False))\n logger.setLevel(logging.INFO)\n for repository in runner.repositories:\n repository.require_installed()\n\n return 0\n\n\ndef uninstall(runner, hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path = runner.get_hook_path(hook_type)\n legacy_path = hook_path + '.legacy'\n # If our file doesn't exist or it isn't ours, gtfo.\n if (\n not os.path.exists(hook_path) or (\n not is_our_pre_commit(hook_path) and\n not is_previous_pre_commit(hook_path)\n )\n ):\n return 0\n\n os.remove(hook_path)\n print('{0} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n print('Restored previous hooks to {0}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]}
| 1,446 | 135 |
gh_patches_debug_9953
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-2447
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Server error on some book imports
**Describe the bug**
When importing some books the application crashes, with "Server Error Something went wrong! Sorry about that."
```
Internal Server Error: /resolve-book
Traceback (most recent call last):
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/contrib/auth/decorators.py", line 21, in _wrapped_view
return view_func(request, *args, **kwargs)
File "/opt/bookwyrm/venv/lib/python3.10/site-packages/django/views/decorators/http.py", line 40, in inner
return func(request, *args, **kwargs)
File "/opt/bookwyrm/bookwyrm/views/books/books.py", line 190, in resolve_book
return redirect("book", book.id)
AttributeError: 'NoneType' object has no attribute 'id'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/connectors/inventaire.py`
Content:
```
1 """ inventaire data connector """
2 import re
3
4 from bookwyrm import models
5 from bookwyrm.book_search import SearchResult
6 from .abstract_connector import AbstractConnector, Mapping
7 from .abstract_connector import get_data
8 from .connector_manager import ConnectorException, create_edition_task
9
10
11 class Connector(AbstractConnector):
12 """instantiate a connector for inventaire"""
13
14 generated_remote_link_field = "inventaire_id"
15
16 def __init__(self, identifier):
17 super().__init__(identifier)
18
19 get_first = lambda a: a[0]
20 shared_mappings = [
21 Mapping("id", remote_field="uri", formatter=self.get_remote_id),
22 Mapping("bnfId", remote_field="wdt:P268", formatter=get_first),
23 Mapping("openlibraryKey", remote_field="wdt:P648", formatter=get_first),
24 ]
25 self.book_mappings = [
26 Mapping("title", remote_field="wdt:P1476", formatter=get_first),
27 Mapping("title", remote_field="labels", formatter=get_language_code),
28 Mapping("subtitle", remote_field="wdt:P1680", formatter=get_first),
29 Mapping("inventaireId", remote_field="uri"),
30 Mapping(
31 "description", remote_field="sitelinks", formatter=self.get_description
32 ),
33 Mapping("cover", remote_field="image", formatter=self.get_cover_url),
34 Mapping("isbn13", remote_field="wdt:P212", formatter=get_first),
35 Mapping("isbn10", remote_field="wdt:P957", formatter=get_first),
36 Mapping("oclcNumber", remote_field="wdt:P5331", formatter=get_first),
37 Mapping("goodreadsKey", remote_field="wdt:P2969", formatter=get_first),
38 Mapping("librarythingKey", remote_field="wdt:P1085", formatter=get_first),
39 Mapping("languages", remote_field="wdt:P407", formatter=self.resolve_keys),
40 Mapping("publishers", remote_field="wdt:P123", formatter=self.resolve_keys),
41 Mapping("publishedDate", remote_field="wdt:P577", formatter=get_first),
42 Mapping("pages", remote_field="wdt:P1104", formatter=get_first),
43 Mapping(
44 "subjectPlaces", remote_field="wdt:P840", formatter=self.resolve_keys
45 ),
46 Mapping("subjects", remote_field="wdt:P921", formatter=self.resolve_keys),
47 Mapping("asin", remote_field="wdt:P5749", formatter=get_first),
48 ] + shared_mappings
49 # TODO: P136: genre, P674 characters, P950 bne
50
51 self.author_mappings = [
52 Mapping("id", remote_field="uri", formatter=self.get_remote_id),
53 Mapping("name", remote_field="labels", formatter=get_language_code),
54 Mapping("bio", remote_field="sitelinks", formatter=self.get_description),
55 Mapping("goodreadsKey", remote_field="wdt:P2963", formatter=get_first),
56 Mapping("isni", remote_field="wdt:P213", formatter=get_first),
57 Mapping("viafId", remote_field="wdt:P214", formatter=get_first),
58 Mapping("gutenberg_id", remote_field="wdt:P1938", formatter=get_first),
59 Mapping("born", remote_field="wdt:P569", formatter=get_first),
60 Mapping("died", remote_field="wdt:P570", formatter=get_first),
61 ] + shared_mappings
62
63 def get_remote_id(self, value):
64 """convert an id/uri into a url"""
65 return f"{self.books_url}?action=by-uris&uris={value}"
66
67 def get_book_data(self, remote_id):
68 data = get_data(remote_id)
69 extracted = list(data.get("entities").values())
70 try:
71 data = extracted[0]
72 except (KeyError, IndexError):
73 raise ConnectorException("Invalid book data")
74 # flatten the data so that images, uri, and claims are on the same level
75 return {
76 **data.get("claims", {}),
77 **{k: data.get(k) for k in ["uri", "image", "labels", "sitelinks", "type"]},
78 }
79
80 def parse_search_data(self, data, min_confidence):
81 for search_result in data.get("results", []):
82 images = search_result.get("image")
83 cover = f"{self.covers_url}/img/entities/{images[0]}" if images else None
84 # a deeply messy translation of inventaire's scores
85 confidence = float(search_result.get("_score", 0.1))
86 confidence = 0.1 if confidence < 150 else 0.999
87 if confidence < min_confidence:
88 continue
89 yield SearchResult(
90 title=search_result.get("label"),
91 key=self.get_remote_id(search_result.get("uri")),
92 author=search_result.get("description"),
93 view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
94 cover=cover,
95 confidence=confidence,
96 connector=self,
97 )
98
99 def parse_isbn_search_data(self, data):
100 """got some daaaata"""
101 results = data.get("entities")
102 if not results:
103 return
104 for search_result in list(results.values()):
105 title = search_result.get("claims", {}).get("wdt:P1476", [])
106 if not title:
107 continue
108 yield SearchResult(
109 title=title[0],
110 key=self.get_remote_id(search_result.get("uri")),
111 author=search_result.get("description"),
112 view_link=f"{self.base_url}/entity/{search_result.get('uri')}",
113 cover=self.get_cover_url(search_result.get("image")),
114 connector=self,
115 )
116
117 def is_work_data(self, data):
118 return data.get("type") == "work"
119
120 def load_edition_data(self, work_uri):
121 """get a list of editions for a work"""
122 # pylint: disable=line-too-long
123 url = f"{self.books_url}?action=reverse-claims&property=wdt:P629&value={work_uri}&sort=true"
124 return get_data(url)
125
126 def get_edition_from_work_data(self, data):
127 data = self.load_edition_data(data.get("uri"))
128 try:
129 uri = data.get("uris", [])[0]
130 except IndexError:
131 raise ConnectorException("Invalid book data")
132 return self.get_book_data(self.get_remote_id(uri))
133
134 def get_work_from_edition_data(self, data):
135 uri = data.get("wdt:P629", [None])[0]
136 if not uri:
137 raise ConnectorException("Invalid book data")
138 return self.get_book_data(self.get_remote_id(uri))
139
140 def get_authors_from_data(self, data):
141 authors = data.get("wdt:P50", [])
142 for author in authors:
143 yield self.get_or_create_author(self.get_remote_id(author))
144
145 def expand_book_data(self, book):
146 work = book
147 # go from the edition to the work, if necessary
148 if isinstance(book, models.Edition):
149 work = book.parent_work
150
151 try:
152 edition_options = self.load_edition_data(work.inventaire_id)
153 except ConnectorException:
154 # who knows, man
155 return
156
157 for edition_uri in edition_options.get("uris"):
158 remote_id = self.get_remote_id(edition_uri)
159 create_edition_task.delay(self.connector.id, work.id, remote_id)
160
161 def create_edition_from_data(self, work, edition_data, instance=None):
162 """pass in the url as data and then call the version in abstract connector"""
163 if isinstance(edition_data, str):
164 try:
165 edition_data = self.get_book_data(edition_data)
166 except ConnectorException:
167 # who, indeed, knows
168 return
169 super().create_edition_from_data(work, edition_data, instance=instance)
170
171 def get_cover_url(self, cover_blob, *_):
172 """format the relative cover url into an absolute one:
173 {"url": "/img/entities/e794783f01b9d4f897a1ea9820b96e00d346994f"}
174 """
175 # covers may or may not be a list
176 if isinstance(cover_blob, list) and len(cover_blob) > 0:
177 cover_blob = cover_blob[0]
178 cover_id = cover_blob.get("url")
179 if not cover_id:
180 return None
181 # cover may or may not be an absolute url already
182 if re.match(r"^http", cover_id):
183 return cover_id
184 return f"{self.covers_url}{cover_id}"
185
186 def resolve_keys(self, keys):
187 """cool, it's "wd:Q3156592" now what the heck does that mean"""
188 results = []
189 for uri in keys:
190 try:
191 data = self.get_book_data(self.get_remote_id(uri))
192 except ConnectorException:
193 continue
194 results.append(get_language_code(data.get("labels")))
195 return results
196
197 def get_description(self, links):
198 """grab an extracted excerpt from wikipedia"""
199 link = links.get("enwiki")
200 if not link:
201 return ""
202 url = f"{self.base_url}/api/data?action=wp-extract&lang=en&title={link}"
203 try:
204 data = get_data(url)
205 except ConnectorException:
206 return ""
207 return data.get("extract")
208
209 def get_remote_id_from_model(self, obj):
210 """use get_remote_id to figure out the link from a model obj"""
211 remote_id_value = obj.inventaire_id
212 return self.get_remote_id(remote_id_value)
213
214
215 def get_language_code(options, code="en"):
216 """when there are a bunch of translation but we need a single field"""
217 result = options.get(code)
218 if result:
219 return result
220 values = list(options.values())
221 return values[0] if values else None
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/connectors/inventaire.py b/bookwyrm/connectors/inventaire.py
--- a/bookwyrm/connectors/inventaire.py
+++ b/bookwyrm/connectors/inventaire.py
@@ -165,8 +165,8 @@
edition_data = self.get_book_data(edition_data)
except ConnectorException:
# who, indeed, knows
- return
- super().create_edition_from_data(work, edition_data, instance=instance)
+ return None
+ return super().create_edition_from_data(work, edition_data, instance=instance)
def get_cover_url(self, cover_blob, *_):
"""format the relative cover url into an absolute one:
|
{"golden_diff": "diff --git a/bookwyrm/connectors/inventaire.py b/bookwyrm/connectors/inventaire.py\n--- a/bookwyrm/connectors/inventaire.py\n+++ b/bookwyrm/connectors/inventaire.py\n@@ -165,8 +165,8 @@\n edition_data = self.get_book_data(edition_data)\n except ConnectorException:\n # who, indeed, knows\n- return\n- super().create_edition_from_data(work, edition_data, instance=instance)\n+ return None\n+ return super().create_edition_from_data(work, edition_data, instance=instance)\n \n def get_cover_url(self, cover_blob, *_):\n \"\"\"format the relative cover url into an absolute one:\n", "issue": "Server error on some book imports\n**Describe the bug**\r\nWhen importing some books the application crashes, with \"Server Error Something went wrong! Sorry about that.\"\r\n```\r\nInternal Server Error: /resolve-book\r\nTraceback (most recent call last):\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/contrib/auth/decorators.py\", line 21, in _wrapped_view\r\n return view_func(request, *args, **kwargs)\r\n File \"/opt/bookwyrm/venv/lib/python3.10/site-packages/django/views/decorators/http.py\", line 40, in inner\r\n return func(request, *args, **kwargs)\r\n File \"/opt/bookwyrm/bookwyrm/views/books/books.py\", line 190, in resolve_book\r\n return redirect(\"book\", book.id)\r\nAttributeError: 'NoneType' object has no attribute 'id'\r\n```\r\n\n", "before_files": [{"content": "\"\"\" inventaire data connector \"\"\"\nimport re\n\nfrom bookwyrm import models\nfrom bookwyrm.book_search import SearchResult\nfrom .abstract_connector import AbstractConnector, Mapping\nfrom .abstract_connector import get_data\nfrom .connector_manager import ConnectorException, create_edition_task\n\n\nclass Connector(AbstractConnector):\n \"\"\"instantiate a connector for inventaire\"\"\"\n\n generated_remote_link_field = \"inventaire_id\"\n\n def __init__(self, identifier):\n super().__init__(identifier)\n\n get_first = lambda a: a[0]\n shared_mappings = [\n Mapping(\"id\", remote_field=\"uri\", formatter=self.get_remote_id),\n Mapping(\"bnfId\", remote_field=\"wdt:P268\", formatter=get_first),\n Mapping(\"openlibraryKey\", remote_field=\"wdt:P648\", formatter=get_first),\n ]\n self.book_mappings = [\n Mapping(\"title\", remote_field=\"wdt:P1476\", formatter=get_first),\n Mapping(\"title\", remote_field=\"labels\", formatter=get_language_code),\n Mapping(\"subtitle\", remote_field=\"wdt:P1680\", formatter=get_first),\n Mapping(\"inventaireId\", remote_field=\"uri\"),\n Mapping(\n \"description\", remote_field=\"sitelinks\", formatter=self.get_description\n ),\n Mapping(\"cover\", remote_field=\"image\", formatter=self.get_cover_url),\n Mapping(\"isbn13\", remote_field=\"wdt:P212\", formatter=get_first),\n Mapping(\"isbn10\", remote_field=\"wdt:P957\", formatter=get_first),\n Mapping(\"oclcNumber\", remote_field=\"wdt:P5331\", formatter=get_first),\n Mapping(\"goodreadsKey\", remote_field=\"wdt:P2969\", formatter=get_first),\n Mapping(\"librarythingKey\", remote_field=\"wdt:P1085\", formatter=get_first),\n Mapping(\"languages\", remote_field=\"wdt:P407\", formatter=self.resolve_keys),\n Mapping(\"publishers\", remote_field=\"wdt:P123\", formatter=self.resolve_keys),\n Mapping(\"publishedDate\", remote_field=\"wdt:P577\", formatter=get_first),\n Mapping(\"pages\", remote_field=\"wdt:P1104\", formatter=get_first),\n Mapping(\n \"subjectPlaces\", remote_field=\"wdt:P840\", formatter=self.resolve_keys\n ),\n Mapping(\"subjects\", remote_field=\"wdt:P921\", formatter=self.resolve_keys),\n Mapping(\"asin\", remote_field=\"wdt:P5749\", formatter=get_first),\n ] + shared_mappings\n # TODO: P136: genre, P674 characters, P950 bne\n\n self.author_mappings = [\n Mapping(\"id\", remote_field=\"uri\", formatter=self.get_remote_id),\n Mapping(\"name\", remote_field=\"labels\", formatter=get_language_code),\n Mapping(\"bio\", remote_field=\"sitelinks\", formatter=self.get_description),\n Mapping(\"goodreadsKey\", remote_field=\"wdt:P2963\", formatter=get_first),\n Mapping(\"isni\", remote_field=\"wdt:P213\", formatter=get_first),\n Mapping(\"viafId\", remote_field=\"wdt:P214\", formatter=get_first),\n Mapping(\"gutenberg_id\", remote_field=\"wdt:P1938\", formatter=get_first),\n Mapping(\"born\", remote_field=\"wdt:P569\", formatter=get_first),\n Mapping(\"died\", remote_field=\"wdt:P570\", formatter=get_first),\n ] + shared_mappings\n\n def get_remote_id(self, value):\n \"\"\"convert an id/uri into a url\"\"\"\n return f\"{self.books_url}?action=by-uris&uris={value}\"\n\n def get_book_data(self, remote_id):\n data = get_data(remote_id)\n extracted = list(data.get(\"entities\").values())\n try:\n data = extracted[0]\n except (KeyError, IndexError):\n raise ConnectorException(\"Invalid book data\")\n # flatten the data so that images, uri, and claims are on the same level\n return {\n **data.get(\"claims\", {}),\n **{k: data.get(k) for k in [\"uri\", \"image\", \"labels\", \"sitelinks\", \"type\"]},\n }\n\n def parse_search_data(self, data, min_confidence):\n for search_result in data.get(\"results\", []):\n images = search_result.get(\"image\")\n cover = f\"{self.covers_url}/img/entities/{images[0]}\" if images else None\n # a deeply messy translation of inventaire's scores\n confidence = float(search_result.get(\"_score\", 0.1))\n confidence = 0.1 if confidence < 150 else 0.999\n if confidence < min_confidence:\n continue\n yield SearchResult(\n title=search_result.get(\"label\"),\n key=self.get_remote_id(search_result.get(\"uri\")),\n author=search_result.get(\"description\"),\n view_link=f\"{self.base_url}/entity/{search_result.get('uri')}\",\n cover=cover,\n confidence=confidence,\n connector=self,\n )\n\n def parse_isbn_search_data(self, data):\n \"\"\"got some daaaata\"\"\"\n results = data.get(\"entities\")\n if not results:\n return\n for search_result in list(results.values()):\n title = search_result.get(\"claims\", {}).get(\"wdt:P1476\", [])\n if not title:\n continue\n yield SearchResult(\n title=title[0],\n key=self.get_remote_id(search_result.get(\"uri\")),\n author=search_result.get(\"description\"),\n view_link=f\"{self.base_url}/entity/{search_result.get('uri')}\",\n cover=self.get_cover_url(search_result.get(\"image\")),\n connector=self,\n )\n\n def is_work_data(self, data):\n return data.get(\"type\") == \"work\"\n\n def load_edition_data(self, work_uri):\n \"\"\"get a list of editions for a work\"\"\"\n # pylint: disable=line-too-long\n url = f\"{self.books_url}?action=reverse-claims&property=wdt:P629&value={work_uri}&sort=true\"\n return get_data(url)\n\n def get_edition_from_work_data(self, data):\n data = self.load_edition_data(data.get(\"uri\"))\n try:\n uri = data.get(\"uris\", [])[0]\n except IndexError:\n raise ConnectorException(\"Invalid book data\")\n return self.get_book_data(self.get_remote_id(uri))\n\n def get_work_from_edition_data(self, data):\n uri = data.get(\"wdt:P629\", [None])[0]\n if not uri:\n raise ConnectorException(\"Invalid book data\")\n return self.get_book_data(self.get_remote_id(uri))\n\n def get_authors_from_data(self, data):\n authors = data.get(\"wdt:P50\", [])\n for author in authors:\n yield self.get_or_create_author(self.get_remote_id(author))\n\n def expand_book_data(self, book):\n work = book\n # go from the edition to the work, if necessary\n if isinstance(book, models.Edition):\n work = book.parent_work\n\n try:\n edition_options = self.load_edition_data(work.inventaire_id)\n except ConnectorException:\n # who knows, man\n return\n\n for edition_uri in edition_options.get(\"uris\"):\n remote_id = self.get_remote_id(edition_uri)\n create_edition_task.delay(self.connector.id, work.id, remote_id)\n\n def create_edition_from_data(self, work, edition_data, instance=None):\n \"\"\"pass in the url as data and then call the version in abstract connector\"\"\"\n if isinstance(edition_data, str):\n try:\n edition_data = self.get_book_data(edition_data)\n except ConnectorException:\n # who, indeed, knows\n return\n super().create_edition_from_data(work, edition_data, instance=instance)\n\n def get_cover_url(self, cover_blob, *_):\n \"\"\"format the relative cover url into an absolute one:\n {\"url\": \"/img/entities/e794783f01b9d4f897a1ea9820b96e00d346994f\"}\n \"\"\"\n # covers may or may not be a list\n if isinstance(cover_blob, list) and len(cover_blob) > 0:\n cover_blob = cover_blob[0]\n cover_id = cover_blob.get(\"url\")\n if not cover_id:\n return None\n # cover may or may not be an absolute url already\n if re.match(r\"^http\", cover_id):\n return cover_id\n return f\"{self.covers_url}{cover_id}\"\n\n def resolve_keys(self, keys):\n \"\"\"cool, it's \"wd:Q3156592\" now what the heck does that mean\"\"\"\n results = []\n for uri in keys:\n try:\n data = self.get_book_data(self.get_remote_id(uri))\n except ConnectorException:\n continue\n results.append(get_language_code(data.get(\"labels\")))\n return results\n\n def get_description(self, links):\n \"\"\"grab an extracted excerpt from wikipedia\"\"\"\n link = links.get(\"enwiki\")\n if not link:\n return \"\"\n url = f\"{self.base_url}/api/data?action=wp-extract&lang=en&title={link}\"\n try:\n data = get_data(url)\n except ConnectorException:\n return \"\"\n return data.get(\"extract\")\n\n def get_remote_id_from_model(self, obj):\n \"\"\"use get_remote_id to figure out the link from a model obj\"\"\"\n remote_id_value = obj.inventaire_id\n return self.get_remote_id(remote_id_value)\n\n\ndef get_language_code(options, code=\"en\"):\n \"\"\"when there are a bunch of translation but we need a single field\"\"\"\n result = options.get(code)\n if result:\n return result\n values = list(options.values())\n return values[0] if values else None\n", "path": "bookwyrm/connectors/inventaire.py"}], "after_files": [{"content": "\"\"\" inventaire data connector \"\"\"\nimport re\n\nfrom bookwyrm import models\nfrom bookwyrm.book_search import SearchResult\nfrom .abstract_connector import AbstractConnector, Mapping\nfrom .abstract_connector import get_data\nfrom .connector_manager import ConnectorException, create_edition_task\n\n\nclass Connector(AbstractConnector):\n \"\"\"instantiate a connector for inventaire\"\"\"\n\n generated_remote_link_field = \"inventaire_id\"\n\n def __init__(self, identifier):\n super().__init__(identifier)\n\n get_first = lambda a: a[0]\n shared_mappings = [\n Mapping(\"id\", remote_field=\"uri\", formatter=self.get_remote_id),\n Mapping(\"bnfId\", remote_field=\"wdt:P268\", formatter=get_first),\n Mapping(\"openlibraryKey\", remote_field=\"wdt:P648\", formatter=get_first),\n ]\n self.book_mappings = [\n Mapping(\"title\", remote_field=\"wdt:P1476\", formatter=get_first),\n Mapping(\"title\", remote_field=\"labels\", formatter=get_language_code),\n Mapping(\"subtitle\", remote_field=\"wdt:P1680\", formatter=get_first),\n Mapping(\"inventaireId\", remote_field=\"uri\"),\n Mapping(\n \"description\", remote_field=\"sitelinks\", formatter=self.get_description\n ),\n Mapping(\"cover\", remote_field=\"image\", formatter=self.get_cover_url),\n Mapping(\"isbn13\", remote_field=\"wdt:P212\", formatter=get_first),\n Mapping(\"isbn10\", remote_field=\"wdt:P957\", formatter=get_first),\n Mapping(\"oclcNumber\", remote_field=\"wdt:P5331\", formatter=get_first),\n Mapping(\"goodreadsKey\", remote_field=\"wdt:P2969\", formatter=get_first),\n Mapping(\"librarythingKey\", remote_field=\"wdt:P1085\", formatter=get_first),\n Mapping(\"languages\", remote_field=\"wdt:P407\", formatter=self.resolve_keys),\n Mapping(\"publishers\", remote_field=\"wdt:P123\", formatter=self.resolve_keys),\n Mapping(\"publishedDate\", remote_field=\"wdt:P577\", formatter=get_first),\n Mapping(\"pages\", remote_field=\"wdt:P1104\", formatter=get_first),\n Mapping(\n \"subjectPlaces\", remote_field=\"wdt:P840\", formatter=self.resolve_keys\n ),\n Mapping(\"subjects\", remote_field=\"wdt:P921\", formatter=self.resolve_keys),\n Mapping(\"asin\", remote_field=\"wdt:P5749\", formatter=get_first),\n ] + shared_mappings\n # TODO: P136: genre, P674 characters, P950 bne\n\n self.author_mappings = [\n Mapping(\"id\", remote_field=\"uri\", formatter=self.get_remote_id),\n Mapping(\"name\", remote_field=\"labels\", formatter=get_language_code),\n Mapping(\"bio\", remote_field=\"sitelinks\", formatter=self.get_description),\n Mapping(\"goodreadsKey\", remote_field=\"wdt:P2963\", formatter=get_first),\n Mapping(\"isni\", remote_field=\"wdt:P213\", formatter=get_first),\n Mapping(\"viafId\", remote_field=\"wdt:P214\", formatter=get_first),\n Mapping(\"gutenberg_id\", remote_field=\"wdt:P1938\", formatter=get_first),\n Mapping(\"born\", remote_field=\"wdt:P569\", formatter=get_first),\n Mapping(\"died\", remote_field=\"wdt:P570\", formatter=get_first),\n ] + shared_mappings\n\n def get_remote_id(self, value):\n \"\"\"convert an id/uri into a url\"\"\"\n return f\"{self.books_url}?action=by-uris&uris={value}\"\n\n def get_book_data(self, remote_id):\n data = get_data(remote_id)\n extracted = list(data.get(\"entities\").values())\n try:\n data = extracted[0]\n except (KeyError, IndexError):\n raise ConnectorException(\"Invalid book data\")\n # flatten the data so that images, uri, and claims are on the same level\n return {\n **data.get(\"claims\", {}),\n **{k: data.get(k) for k in [\"uri\", \"image\", \"labels\", \"sitelinks\", \"type\"]},\n }\n\n def parse_search_data(self, data, min_confidence):\n for search_result in data.get(\"results\", []):\n images = search_result.get(\"image\")\n cover = f\"{self.covers_url}/img/entities/{images[0]}\" if images else None\n # a deeply messy translation of inventaire's scores\n confidence = float(search_result.get(\"_score\", 0.1))\n confidence = 0.1 if confidence < 150 else 0.999\n if confidence < min_confidence:\n continue\n yield SearchResult(\n title=search_result.get(\"label\"),\n key=self.get_remote_id(search_result.get(\"uri\")),\n author=search_result.get(\"description\"),\n view_link=f\"{self.base_url}/entity/{search_result.get('uri')}\",\n cover=cover,\n confidence=confidence,\n connector=self,\n )\n\n def parse_isbn_search_data(self, data):\n \"\"\"got some daaaata\"\"\"\n results = data.get(\"entities\")\n if not results:\n return\n for search_result in list(results.values()):\n title = search_result.get(\"claims\", {}).get(\"wdt:P1476\", [])\n if not title:\n continue\n yield SearchResult(\n title=title[0],\n key=self.get_remote_id(search_result.get(\"uri\")),\n author=search_result.get(\"description\"),\n view_link=f\"{self.base_url}/entity/{search_result.get('uri')}\",\n cover=self.get_cover_url(search_result.get(\"image\")),\n connector=self,\n )\n\n def is_work_data(self, data):\n return data.get(\"type\") == \"work\"\n\n def load_edition_data(self, work_uri):\n \"\"\"get a list of editions for a work\"\"\"\n # pylint: disable=line-too-long\n url = f\"{self.books_url}?action=reverse-claims&property=wdt:P629&value={work_uri}&sort=true\"\n return get_data(url)\n\n def get_edition_from_work_data(self, data):\n data = self.load_edition_data(data.get(\"uri\"))\n try:\n uri = data.get(\"uris\", [])[0]\n except IndexError:\n raise ConnectorException(\"Invalid book data\")\n return self.get_book_data(self.get_remote_id(uri))\n\n def get_work_from_edition_data(self, data):\n uri = data.get(\"wdt:P629\", [None])[0]\n if not uri:\n raise ConnectorException(\"Invalid book data\")\n return self.get_book_data(self.get_remote_id(uri))\n\n def get_authors_from_data(self, data):\n authors = data.get(\"wdt:P50\", [])\n for author in authors:\n yield self.get_or_create_author(self.get_remote_id(author))\n\n def expand_book_data(self, book):\n work = book\n # go from the edition to the work, if necessary\n if isinstance(book, models.Edition):\n work = book.parent_work\n\n try:\n edition_options = self.load_edition_data(work.inventaire_id)\n except ConnectorException:\n # who knows, man\n return\n\n for edition_uri in edition_options.get(\"uris\"):\n remote_id = self.get_remote_id(edition_uri)\n create_edition_task.delay(self.connector.id, work.id, remote_id)\n\n def create_edition_from_data(self, work, edition_data, instance=None):\n \"\"\"pass in the url as data and then call the version in abstract connector\"\"\"\n if isinstance(edition_data, str):\n try:\n edition_data = self.get_book_data(edition_data)\n except ConnectorException:\n # who, indeed, knows\n return None\n return super().create_edition_from_data(work, edition_data, instance=instance)\n\n def get_cover_url(self, cover_blob, *_):\n \"\"\"format the relative cover url into an absolute one:\n {\"url\": \"/img/entities/e794783f01b9d4f897a1ea9820b96e00d346994f\"}\n \"\"\"\n # covers may or may not be a list\n if isinstance(cover_blob, list) and len(cover_blob) > 0:\n cover_blob = cover_blob[0]\n cover_id = cover_blob.get(\"url\")\n if not cover_id:\n return None\n # cover may or may not be an absolute url already\n if re.match(r\"^http\", cover_id):\n return cover_id\n return f\"{self.covers_url}{cover_id}\"\n\n def resolve_keys(self, keys):\n \"\"\"cool, it's \"wd:Q3156592\" now what the heck does that mean\"\"\"\n results = []\n for uri in keys:\n try:\n data = self.get_book_data(self.get_remote_id(uri))\n except ConnectorException:\n continue\n results.append(get_language_code(data.get(\"labels\")))\n return results\n\n def get_description(self, links):\n \"\"\"grab an extracted excerpt from wikipedia\"\"\"\n link = links.get(\"enwiki\")\n if not link:\n return \"\"\n url = f\"{self.base_url}/api/data?action=wp-extract&lang=en&title={link}\"\n try:\n data = get_data(url)\n except ConnectorException:\n return \"\"\n return data.get(\"extract\")\n\n def get_remote_id_from_model(self, obj):\n \"\"\"use get_remote_id to figure out the link from a model obj\"\"\"\n remote_id_value = obj.inventaire_id\n return self.get_remote_id(remote_id_value)\n\n\ndef get_language_code(options, code=\"en\"):\n \"\"\"when there are a bunch of translation but we need a single field\"\"\"\n result = options.get(code)\n if result:\n return result\n values = list(options.values())\n return values[0] if values else None\n", "path": "bookwyrm/connectors/inventaire.py"}]}
| 3,280 | 158 |
gh_patches_debug_33227
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-1910
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should rio clip fill nodata at borders?
Hello lovely people :hugs:,
I'm using `rio clip` to quickly cut out z10 tiles for raster image. The raster image is not divisible by the z10 tiles, so I expect the border tiles to contain nodata. But more importantly I expect all z10 tiles I cut out to be of the same dimension. This is not the case with rio clip right now.
Here is an example where I first convert my raster image to WGS84 and then cut it into z10 tiles where the file name is the z10 quad key id.
```
rio warp in.tif wgs84.tif --dst-crs EPSG:4326
rio bounds wgs84.tif | mercantile tiles 10 > z10.txt
while read tile; do
rio clip wgs84.tif $(mercantile quadkey "$tile").tif --bounds "$(mercantile shapes --extents "$tile")"
done < z10.txt
```
This results in
- many raster images of pixel size 2857x1744
- some raster images (at the borders) with smaller dimensions; the bounds are not filled with nodata
Should rio clip use boundless windows and fill with nodata, to respect the user's bounds?
(rio 1.1.3, tested in ubuntu 18.04 docker container with python 3.6)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/clip.py`
Content:
```
1 """File translation command"""
2
3 import logging
4
5 import click
6 from cligj import format_opt
7
8 from .helpers import resolve_inout
9 from . import options
10 import rasterio
11 from rasterio.coords import disjoint_bounds
12 from rasterio.crs import CRS
13 from rasterio.windows import Window
14
15
16 logger = logging.getLogger(__name__)
17
18
19 # Geographic (default), projected, or Mercator switch.
20 projection_geographic_opt = click.option(
21 '--geographic',
22 'projection',
23 flag_value='geographic',
24 help="Bounds in geographic coordinates.")
25
26 projection_projected_opt = click.option(
27 '--projected',
28 'projection',
29 flag_value='projected',
30 default=True,
31 help="Bounds in input's own projected coordinates (the default).")
32
33
34 # Clip command
35 @click.command(short_help='Clip a raster to given bounds.')
36 @click.argument(
37 'files',
38 nargs=-1,
39 type=click.Path(),
40 required=True,
41 metavar="INPUT OUTPUT")
42 @options.output_opt
43 @options.bounds_opt
44 @click.option(
45 '--like',
46 type=click.Path(exists=True),
47 help='Raster dataset to use as a template for bounds')
48 @format_opt
49 @projection_geographic_opt
50 @projection_projected_opt
51 @options.overwrite_opt
52 @options.creation_options
53 @click.pass_context
54 def clip(ctx, files, output, bounds, like, driver, projection,
55 overwrite, creation_options):
56 """Clips a raster using projected or geographic bounds.
57
58 \b
59 $ rio clip input.tif output.tif --bounds xmin ymin xmax ymax
60 $ rio clip input.tif output.tif --like template.tif
61
62 The values of --bounds are presumed to be from the coordinate
63 reference system of the input dataset unless the --geographic option
64 is used, in which case the values may be longitude and latitude
65 bounds. Either JSON, for example "[west, south, east, north]", or
66 plain text "west south east north" representations of a bounding box
67 are acceptable.
68
69 If using --like, bounds will automatically be transformed to match the
70 coordinate reference system of the input.
71
72 It can also be combined to read bounds of a feature dataset using Fiona:
73
74 \b
75 $ rio clip input.tif output.tif --bounds $(fio info features.shp --bounds)
76
77 """
78 from rasterio.warp import transform_bounds
79
80 with ctx.obj['env']:
81
82 output, files = resolve_inout(files=files, output=output, overwrite=overwrite)
83 input = files[0]
84
85 with rasterio.open(input) as src:
86 if bounds:
87 if projection == 'geographic':
88 bounds = transform_bounds(CRS.from_epsg(4326), src.crs, *bounds)
89 if disjoint_bounds(bounds, src.bounds):
90 raise click.BadParameter('must overlap the extent of '
91 'the input raster',
92 param='--bounds',
93 param_hint='--bounds')
94 elif like:
95 with rasterio.open(like) as template_ds:
96 bounds = template_ds.bounds
97 if template_ds.crs != src.crs:
98 bounds = transform_bounds(template_ds.crs, src.crs,
99 *bounds)
100
101 if disjoint_bounds(bounds, src.bounds):
102 raise click.BadParameter('must overlap the extent of '
103 'the input raster',
104 param='--like',
105 param_hint='--like')
106
107 else:
108 raise click.UsageError('--bounds or --like required')
109
110 bounds_window = src.window(*bounds)
111 bounds_window = bounds_window.intersection(
112 Window(0, 0, src.width, src.height))
113
114 # Get the window with integer height
115 # and width that contains the bounds window.
116 out_window = bounds_window.round_lengths(op='ceil')
117
118 height = int(out_window.height)
119 width = int(out_window.width)
120
121 out_kwargs = src.profile
122 out_kwargs.update({
123 'driver': driver,
124 'height': height,
125 'width': width,
126 'transform': src.window_transform(out_window)})
127 out_kwargs.update(**creation_options)
128
129 if 'blockxsize' in out_kwargs and out_kwargs['blockxsize'] > width:
130 del out_kwargs['blockxsize']
131 logger.warning("Blockxsize removed from creation options to accomodate small output width")
132 if 'blockysize' in out_kwargs and out_kwargs['blockysize'] > height:
133 del out_kwargs['blockysize']
134 logger.warning("Blockysize removed from creation options to accomodate small output height")
135
136 with rasterio.open(output, 'w', **out_kwargs) as out:
137 out.write(src.read(window=out_window,
138 out_shape=(src.count, height, width)))
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/rio/clip.py b/rasterio/rio/clip.py
--- a/rasterio/rio/clip.py
+++ b/rasterio/rio/clip.py
@@ -50,9 +50,24 @@
@projection_projected_opt
@options.overwrite_opt
@options.creation_options
[email protected](
+ "--with-complement/--without-complement",
+ default=False,
+ help="Include the relative complement of the raster in the given bounds (giving a larger result), else return results only from the intersection of the raster and the bounds (the default).",
+)
@click.pass_context
-def clip(ctx, files, output, bounds, like, driver, projection,
- overwrite, creation_options):
+def clip(
+ ctx,
+ files,
+ output,
+ bounds,
+ like,
+ driver,
+ projection,
+ overwrite,
+ creation_options,
+ with_complement,
+):
"""Clips a raster using projected or geographic bounds.
\b
@@ -108,8 +123,11 @@
raise click.UsageError('--bounds or --like required')
bounds_window = src.window(*bounds)
- bounds_window = bounds_window.intersection(
- Window(0, 0, src.width, src.height))
+
+ if not with_complement:
+ bounds_window = bounds_window.intersection(
+ Window(0, 0, src.width, src.height)
+ )
# Get the window with integer height
# and width that contains the bounds window.
@@ -133,6 +151,11 @@
del out_kwargs['blockysize']
logger.warning("Blockysize removed from creation options to accomodate small output height")
- with rasterio.open(output, 'w', **out_kwargs) as out:
- out.write(src.read(window=out_window,
- out_shape=(src.count, height, width)))
+ with rasterio.open(output, "w", **out_kwargs) as out:
+ out.write(
+ src.read(
+ window=out_window,
+ out_shape=(src.count, height, width),
+ boundless=True,
+ )
+ )
|
{"golden_diff": "diff --git a/rasterio/rio/clip.py b/rasterio/rio/clip.py\n--- a/rasterio/rio/clip.py\n+++ b/rasterio/rio/clip.py\n@@ -50,9 +50,24 @@\n @projection_projected_opt\n @options.overwrite_opt\n @options.creation_options\[email protected](\n+ \"--with-complement/--without-complement\",\n+ default=False,\n+ help=\"Include the relative complement of the raster in the given bounds (giving a larger result), else return results only from the intersection of the raster and the bounds (the default).\",\n+)\n @click.pass_context\n-def clip(ctx, files, output, bounds, like, driver, projection,\n- overwrite, creation_options):\n+def clip(\n+ ctx,\n+ files,\n+ output,\n+ bounds,\n+ like,\n+ driver,\n+ projection,\n+ overwrite,\n+ creation_options,\n+ with_complement,\n+):\n \"\"\"Clips a raster using projected or geographic bounds.\n \n \\b\n@@ -108,8 +123,11 @@\n raise click.UsageError('--bounds or --like required')\n \n bounds_window = src.window(*bounds)\n- bounds_window = bounds_window.intersection(\n- Window(0, 0, src.width, src.height))\n+\n+ if not with_complement:\n+ bounds_window = bounds_window.intersection(\n+ Window(0, 0, src.width, src.height)\n+ )\n \n # Get the window with integer height\n # and width that contains the bounds window.\n@@ -133,6 +151,11 @@\n del out_kwargs['blockysize']\n logger.warning(\"Blockysize removed from creation options to accomodate small output height\")\n \n- with rasterio.open(output, 'w', **out_kwargs) as out:\n- out.write(src.read(window=out_window,\n- out_shape=(src.count, height, width)))\n+ with rasterio.open(output, \"w\", **out_kwargs) as out:\n+ out.write(\n+ src.read(\n+ window=out_window,\n+ out_shape=(src.count, height, width),\n+ boundless=True,\n+ )\n+ )\n", "issue": "Should rio clip fill nodata at borders?\nHello lovely people :hugs:,\r\n\r\nI'm using `rio clip` to quickly cut out z10 tiles for raster image. The raster image is not divisible by the z10 tiles, so I expect the border tiles to contain nodata. But more importantly I expect all z10 tiles I cut out to be of the same dimension. This is not the case with rio clip right now.\r\n\r\nHere is an example where I first convert my raster image to WGS84 and then cut it into z10 tiles where the file name is the z10 quad key id.\r\n\r\n```\r\nrio warp in.tif wgs84.tif --dst-crs EPSG:4326\r\nrio bounds wgs84.tif | mercantile tiles 10 > z10.txt\r\n \r\nwhile read tile; do\r\n rio clip wgs84.tif $(mercantile quadkey \"$tile\").tif --bounds \"$(mercantile shapes --extents \"$tile\")\"\r\ndone < z10.txt\r\n```\r\n\r\nThis results in\r\n- many raster images of pixel size 2857x1744\r\n- some raster images (at the borders) with smaller dimensions; the bounds are not filled with nodata\r\n\r\nShould rio clip use boundless windows and fill with nodata, to respect the user's bounds?\r\n\r\n(rio 1.1.3, tested in ubuntu 18.04 docker container with python 3.6)\n", "before_files": [{"content": "\"\"\"File translation command\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\nfrom rasterio.coords import disjoint_bounds\nfrom rasterio.crs import CRS\nfrom rasterio.windows import Window\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Geographic (default), projected, or Mercator switch.\nprojection_geographic_opt = click.option(\n '--geographic',\n 'projection',\n flag_value='geographic',\n help=\"Bounds in geographic coordinates.\")\n\nprojection_projected_opt = click.option(\n '--projected',\n 'projection',\n flag_value='projected',\n default=True,\n help=\"Bounds in input's own projected coordinates (the default).\")\n\n\n# Clip command\[email protected](short_help='Clip a raster to given bounds.')\[email protected](\n 'files',\n nargs=-1,\n type=click.Path(),\n required=True,\n metavar=\"INPUT OUTPUT\")\[email protected]_opt\[email protected]_opt\[email protected](\n '--like',\n type=click.Path(exists=True),\n help='Raster dataset to use as a template for bounds')\n@format_opt\n@projection_geographic_opt\n@projection_projected_opt\[email protected]_opt\[email protected]_options\[email protected]_context\ndef clip(ctx, files, output, bounds, like, driver, projection,\n overwrite, creation_options):\n \"\"\"Clips a raster using projected or geographic bounds.\n\n \\b\n $ rio clip input.tif output.tif --bounds xmin ymin xmax ymax\n $ rio clip input.tif output.tif --like template.tif\n\n The values of --bounds are presumed to be from the coordinate\n reference system of the input dataset unless the --geographic option\n is used, in which case the values may be longitude and latitude\n bounds. Either JSON, for example \"[west, south, east, north]\", or\n plain text \"west south east north\" representations of a bounding box\n are acceptable.\n\n If using --like, bounds will automatically be transformed to match the\n coordinate reference system of the input.\n\n It can also be combined to read bounds of a feature dataset using Fiona:\n\n \\b\n $ rio clip input.tif output.tif --bounds $(fio info features.shp --bounds)\n\n \"\"\"\n from rasterio.warp import transform_bounds\n\n with ctx.obj['env']:\n\n output, files = resolve_inout(files=files, output=output, overwrite=overwrite)\n input = files[0]\n\n with rasterio.open(input) as src:\n if bounds:\n if projection == 'geographic':\n bounds = transform_bounds(CRS.from_epsg(4326), src.crs, *bounds)\n if disjoint_bounds(bounds, src.bounds):\n raise click.BadParameter('must overlap the extent of '\n 'the input raster',\n param='--bounds',\n param_hint='--bounds')\n elif like:\n with rasterio.open(like) as template_ds:\n bounds = template_ds.bounds\n if template_ds.crs != src.crs:\n bounds = transform_bounds(template_ds.crs, src.crs,\n *bounds)\n\n if disjoint_bounds(bounds, src.bounds):\n raise click.BadParameter('must overlap the extent of '\n 'the input raster',\n param='--like',\n param_hint='--like')\n\n else:\n raise click.UsageError('--bounds or --like required')\n\n bounds_window = src.window(*bounds)\n bounds_window = bounds_window.intersection(\n Window(0, 0, src.width, src.height))\n\n # Get the window with integer height\n # and width that contains the bounds window.\n out_window = bounds_window.round_lengths(op='ceil')\n\n height = int(out_window.height)\n width = int(out_window.width)\n\n out_kwargs = src.profile\n out_kwargs.update({\n 'driver': driver,\n 'height': height,\n 'width': width,\n 'transform': src.window_transform(out_window)})\n out_kwargs.update(**creation_options)\n\n if 'blockxsize' in out_kwargs and out_kwargs['blockxsize'] > width:\n del out_kwargs['blockxsize']\n logger.warning(\"Blockxsize removed from creation options to accomodate small output width\")\n if 'blockysize' in out_kwargs and out_kwargs['blockysize'] > height:\n del out_kwargs['blockysize']\n logger.warning(\"Blockysize removed from creation options to accomodate small output height\")\n\n with rasterio.open(output, 'w', **out_kwargs) as out:\n out.write(src.read(window=out_window,\n out_shape=(src.count, height, width)))\n", "path": "rasterio/rio/clip.py"}], "after_files": [{"content": "\"\"\"File translation command\"\"\"\n\nimport logging\n\nimport click\nfrom cligj import format_opt\n\nfrom .helpers import resolve_inout\nfrom . import options\nimport rasterio\nfrom rasterio.coords import disjoint_bounds\nfrom rasterio.crs import CRS\nfrom rasterio.windows import Window\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Geographic (default), projected, or Mercator switch.\nprojection_geographic_opt = click.option(\n '--geographic',\n 'projection',\n flag_value='geographic',\n help=\"Bounds in geographic coordinates.\")\n\nprojection_projected_opt = click.option(\n '--projected',\n 'projection',\n flag_value='projected',\n default=True,\n help=\"Bounds in input's own projected coordinates (the default).\")\n\n\n# Clip command\[email protected](short_help='Clip a raster to given bounds.')\[email protected](\n 'files',\n nargs=-1,\n type=click.Path(),\n required=True,\n metavar=\"INPUT OUTPUT\")\[email protected]_opt\[email protected]_opt\[email protected](\n '--like',\n type=click.Path(exists=True),\n help='Raster dataset to use as a template for bounds')\n@format_opt\n@projection_geographic_opt\n@projection_projected_opt\[email protected]_opt\[email protected]_options\[email protected](\n \"--with-complement/--without-complement\",\n default=False,\n help=\"Include the relative complement of the raster in the given bounds (giving a larger result), else return results only from the intersection of the raster and the bounds (the default).\",\n)\[email protected]_context\ndef clip(\n ctx,\n files,\n output,\n bounds,\n like,\n driver,\n projection,\n overwrite,\n creation_options,\n with_complement,\n):\n \"\"\"Clips a raster using projected or geographic bounds.\n\n \\b\n $ rio clip input.tif output.tif --bounds xmin ymin xmax ymax\n $ rio clip input.tif output.tif --like template.tif\n\n The values of --bounds are presumed to be from the coordinate\n reference system of the input dataset unless the --geographic option\n is used, in which case the values may be longitude and latitude\n bounds. Either JSON, for example \"[west, south, east, north]\", or\n plain text \"west south east north\" representations of a bounding box\n are acceptable.\n\n If using --like, bounds will automatically be transformed to match the\n coordinate reference system of the input.\n\n It can also be combined to read bounds of a feature dataset using Fiona:\n\n \\b\n $ rio clip input.tif output.tif --bounds $(fio info features.shp --bounds)\n\n \"\"\"\n from rasterio.warp import transform_bounds\n\n with ctx.obj['env']:\n\n output, files = resolve_inout(files=files, output=output, overwrite=overwrite)\n input = files[0]\n\n with rasterio.open(input) as src:\n if bounds:\n if projection == 'geographic':\n bounds = transform_bounds(CRS.from_epsg(4326), src.crs, *bounds)\n if disjoint_bounds(bounds, src.bounds):\n raise click.BadParameter('must overlap the extent of '\n 'the input raster',\n param='--bounds',\n param_hint='--bounds')\n elif like:\n with rasterio.open(like) as template_ds:\n bounds = template_ds.bounds\n if template_ds.crs != src.crs:\n bounds = transform_bounds(template_ds.crs, src.crs,\n *bounds)\n\n if disjoint_bounds(bounds, src.bounds):\n raise click.BadParameter('must overlap the extent of '\n 'the input raster',\n param='--like',\n param_hint='--like')\n\n else:\n raise click.UsageError('--bounds or --like required')\n\n bounds_window = src.window(*bounds)\n\n if not with_complement:\n bounds_window = bounds_window.intersection(\n Window(0, 0, src.width, src.height)\n )\n\n # Get the window with integer height\n # and width that contains the bounds window.\n out_window = bounds_window.round_lengths(op='ceil')\n\n height = int(out_window.height)\n width = int(out_window.width)\n\n out_kwargs = src.profile\n out_kwargs.update({\n 'driver': driver,\n 'height': height,\n 'width': width,\n 'transform': src.window_transform(out_window)})\n out_kwargs.update(**creation_options)\n\n if 'blockxsize' in out_kwargs and out_kwargs['blockxsize'] > width:\n del out_kwargs['blockxsize']\n logger.warning(\"Blockxsize removed from creation options to accomodate small output width\")\n if 'blockysize' in out_kwargs and out_kwargs['blockysize'] > height:\n del out_kwargs['blockysize']\n logger.warning(\"Blockysize removed from creation options to accomodate small output height\")\n\n with rasterio.open(output, \"w\", **out_kwargs) as out:\n out.write(\n src.read(\n window=out_window,\n out_shape=(src.count, height, width),\n boundless=True,\n )\n )\n", "path": "rasterio/rio/clip.py"}]}
| 1,884 | 487 |
gh_patches_debug_25125
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-646
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add another optimizer (or 2) as option
**Is your feature request related to a problem? Please describe.**
Currently, GaNDLF only offers Adam and SGD as options for optimizers, which could sometimes be a bit limiting.
**Describe the solution you'd like**
Add ~[RMSProp](https://pytorch.org/docs/stable/generated/torch.optim.RMSprop.html#torch.optim.RMSprop), and optionally~ one or more of the following:
- [LBFGS](https://pytorch.org/docs/1.13/generated/torch.optim.LBFGS.html#torch.optim.LBFGS)
- [RAdam](https://pytorch.org/docs/1.13/generated/torch.optim.RAdam.html#torch.optim.RAdam)
- ~[Rprop](https://pytorch.org/docs/1.11/generated/torch.optim.Rprop.html#torch.optim.Rprop)~
- ~[ASGD](https://pytorch.org/docs/1.11/generated/torch.optim.ASGD.html#torch.optim.ASGD)~
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
EDIT [2023/03/20]: updated to the optimizers that are actually missing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/optimizers/__init__.py`
Content:
```
1 from .wrap_torch import (
2 sgd,
3 asgd,
4 adam,
5 adamw,
6 adamax,
7 # sparseadam,
8 rprop,
9 adadelta,
10 adagrad,
11 rmsprop,
12 )
13
14 global_optimizer_dict = {
15 "sgd": sgd,
16 "asgd": asgd,
17 "adam": adam,
18 "adamw": adamw,
19 "adamax": adamax,
20 # "sparseadam": sparseadam,
21 "rprop": rprop,
22 "adadelta": adadelta,
23 "adagrad": adagrad,
24 "rmsprop": rmsprop,
25 }
26
27
28 def get_optimizer(params):
29 """
30 Returns an instance of the specified optimizer from the PyTorch `torch.optim` module.
31
32 Args:
33 params (dict): A dictionary containing the input parameters for the optimizer.
34
35 Returns:
36 optimizer (torch.optim.Optimizer): An instance of the specified optimizer.
37
38 """
39 # Retrieve the optimizer type from the input parameters
40 optimizer_type = params["optimizer"]["type"]
41
42 # Create the optimizer instance using the specified type and input parameters
43 if optimizer_type in global_optimizer_dict:
44 optimizer_function = global_optimizer_dict[optimizer_type]
45 return optimizer_function(params)
46 else:
47 raise ValueError("Optimizer type %s not found" % optimizer_type)
48
```
Path: `GANDLF/optimizers/wrap_torch.py`
Content:
```
1 from torch.optim import (
2 SGD,
3 ASGD,
4 Rprop,
5 Adam,
6 AdamW,
7 # SparseAdam,
8 Adamax,
9 Adadelta,
10 Adagrad,
11 RMSprop,
12 )
13
14
15 def sgd(parameters):
16 """
17 Creates a Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.
18
19 Args:
20 parameters (dict): A dictionary containing the input parameters for the optimizer.
21
22 Returns:
23 optimizer (torch.optim.SGD): A Stochastic Gradient Descent optimizer.
24
25 """
26 # Create the optimizer using the input parameters
27 optimizer = SGD(
28 parameters["model_parameters"],
29 lr=parameters.get("learning_rate"),
30 momentum=parameters["optimizer"].get("momentum", 0.9),
31 weight_decay=parameters["optimizer"].get("weight_decay", 0),
32 dampening=parameters["optimizer"].get("dampening", 0),
33 nesterov=parameters["optimizer"].get("Nesterov", False),
34 )
35
36 return optimizer
37
38
39 def asgd(parameters):
40 """
41 Creates an Averaged Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.
42
43 Args:
44 parameters (dict): A dictionary containing the input parameters for the optimizer.
45
46 Returns:
47 optimizer (torch.optim.ASGD): An Averaged Stochastic Gradient Descent optimizer.
48
49 """
50 # Create the optimizer using the input parameters
51 return ASGD(
52 parameters["model_parameters"],
53 lr=parameters.get("learning_rate"),
54 alpha=parameters["optimizer"].get("alpha", 0.75),
55 t0=parameters["optimizer"].get("t0", 1e6),
56 lambd=parameters["optimizer"].get("lambd", 1e-4),
57 weight_decay=parameters["optimizer"].get("weight_decay", 0),
58 )
59
60
61 def adam(parameters, opt_type="normal"):
62 """
63 Creates an Adam or AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.
64
65 Args:
66 parameters (dict): A dictionary containing the input parameters for the optimizer.
67 opt_type (str): A string indicating the type of optimizer to create (either "normal" for Adam or "AdamW" for AdamW).
68
69 Returns:
70 optimizer (torch.optim.Adam or torch.optim.AdamW): An Adam or AdamW optimizer.
71
72 """
73 # Determine which optimizer to create based on opt_type
74 if opt_type == "normal":
75 optimizer_fn = Adam
76 elif opt_type == "AdamW":
77 optimizer_fn = AdamW
78 else:
79 raise ValueError(f"Invalid optimizer type: {opt_type}")
80
81 # Create the optimizer using the input parameters
82 return optimizer_fn(
83 parameters["model_parameters"],
84 lr=parameters.get("learning_rate"),
85 betas=parameters["optimizer"].get("betas", (0.9, 0.999)),
86 weight_decay=parameters["optimizer"].get("weight_decay", 0.00005),
87 eps=parameters["optimizer"].get("eps", 1e-8),
88 amsgrad=parameters["optimizer"].get("amsgrad", False),
89 )
90
91
92 def adamw(parameters):
93 """
94 Creates an AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.
95
96 Args:
97 parameters (dict): A dictionary containing the input parameters for the optimizer.
98
99 Returns:
100 optimizer (torch.optim.AdamW): An AdamW optimizer.
101
102 """
103 return adam(parameters, opt_type="AdamW")
104
105 def adamax(parameters):
106 """
107 Creates an Adamax optimizer from the PyTorch `torch.optim` module using the input parameters.
108
109 Args:
110 parameters (dict): A dictionary containing the input parameters for the optimizer.
111
112 Returns:
113 optimizer (torch.optim.Adamax): An Adamax optimizer.
114
115 """
116 # Create the optimizer using the input parameters
117 return Adamax(
118 parameters["model_parameters"],
119 lr=parameters.get("learning_rate"),
120 betas=parameters["optimizer"].get("betas", (0.9, 0.999)),
121 weight_decay=parameters["optimizer"].get("weight_decay", 0.00005),
122 eps=parameters["optimizer"].get("eps", 1e-8),
123 )
124
125
126 # def sparseadam(parameters):
127 # # pick defaults
128 # if not ("betas" in parameters["optimizer"]):
129 # parameters["optimizer"]["betas"] = (0.9, 0.999)
130 # if not ("eps" in parameters["optimizer"]):
131 # parameters["optimizer"]["eps"] = 1e-8
132
133 # return SparseAdam(
134 # parameters["model_parameters"],
135 # lr=parameters["learning_rate"],
136 # betas=parameters["optimizer"]["betas"],
137 # eps=parameters["optimizer"]["eps"],
138 # )
139
140
141 def rprop(parameters):
142 """
143 Creates a Resilient Backpropagation optimizer from the PyTorch `torch.optim` module using the input parameters.
144
145 Args:
146 parameters (dict): A dictionary containing the input parameters for the optimizer.
147
148 Returns:
149 optimizer (torch.optim.Rprop): A Resilient Backpropagation optimizer.
150
151 """
152 # Create the optimizer using the input parameters
153 return Rprop(
154 parameters["model_parameters"],
155 lr=parameters.get("learning_rate"),
156 etas=parameters["optimizer"].get("etas", (0.5, 1.2)),
157 step_sizes=parameters["optimizer"].get("step_sizes", (1e-7, 50)),
158 )
159
160
161 def adadelta(parameters):
162 """
163 Creates an Adadelta optimizer from the PyTorch `torch.optim` module using the input parameters.
164
165 Args:
166 parameters (dict): A dictionary containing the input parameters for the optimizer.
167
168 Returns:
169 optimizer (torch.optim.Adadelta): An Adadelta optimizer.
170
171 """
172 # Create the optimizer using the input parameters
173 return Adadelta(
174 parameters["model_parameters"],
175 lr=parameters.get("learning_rate"),
176 rho=parameters["optimizer"].get("rho", 0.9),
177 eps=parameters["optimizer"].get("eps", 1e-6),
178 weight_decay=parameters["optimizer"].get("weight_decay", 0),
179 )
180
181
182 def adagrad(parameters):
183 """
184 Creates an Adagrad optimizer from the PyTorch `torch.optim` module using the input parameters.
185
186 Args:
187 parameters (dict): A dictionary containing the input parameters for the optimizer.
188
189 Returns:
190 optimizer (torch.optim.Adagrad): An Adagrad optimizer.
191
192 """
193
194 # Create the optimizer using the input parameters
195 return Adagrad(
196 parameters["model_parameters"],
197 lr=parameters.get("learning_rate"),
198 lr_decay=parameters["optimizer"].get("lr_decay", 0),
199 eps=parameters["optimizer"].get("eps", 1e-6),
200 weight_decay=parameters["optimizer"].get("weight_decay", 0),
201 )
202
203
204 def rmsprop(parameters):
205 """
206 Creates an RMSprop optimizer from the PyTorch `torch.optim` module using the input parameters.
207
208 Args:
209 parameters (dict): A dictionary containing the input parameters for the optimizer.
210
211 Returns:
212 optimizer (torch.optim.RMSprop): An RMSprop optimizer.
213
214 """
215 # Create the optimizer using the input parameters
216 return RMSprop(
217 parameters["model_parameters"],
218 lr=parameters.get("learning_rate"),
219 alpha=parameters["optimizer"].get("alpha", 0.99),
220 eps=parameters["optimizer"].get("eps", 1e-8),
221 centered=parameters["optimizer"].get("centered", False),
222 momentum=parameters["optimizer"].get("momentum", 0),
223 weight_decay=parameters["optimizer"].get("weight_decay", 0),
224 )
225
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/GANDLF/optimizers/__init__.py b/GANDLF/optimizers/__init__.py
--- a/GANDLF/optimizers/__init__.py
+++ b/GANDLF/optimizers/__init__.py
@@ -9,6 +9,7 @@
adadelta,
adagrad,
rmsprop,
+ radam,
)
global_optimizer_dict = {
@@ -22,6 +23,7 @@
"adadelta": adadelta,
"adagrad": adagrad,
"rmsprop": rmsprop,
+ "radam": radam,
}
diff --git a/GANDLF/optimizers/wrap_torch.py b/GANDLF/optimizers/wrap_torch.py
--- a/GANDLF/optimizers/wrap_torch.py
+++ b/GANDLF/optimizers/wrap_torch.py
@@ -9,6 +9,7 @@
Adadelta,
Adagrad,
RMSprop,
+ RAdam
)
@@ -223,3 +224,23 @@
weight_decay=parameters["optimizer"].get("weight_decay", 0),
)
+def radam(parameters):
+ """
+ Creates a RAdam optimizer from the PyTorch `torch.optim` module using the input parameters.
+
+ Args:
+ parameters (dict): A dictionary containing the input parameters for the optimizer.
+
+ Returns:
+ optimizer (torch.optim.RAdam): A RAdam optimizer.
+ """
+ # Create the optimizer using the input parameters
+ return RAdam(
+ parameters["model_parameters"],
+ lr=parameters.get("learning_rate"),
+ betas=parameters["optimizer"].get("betas", (0.9, 0.999)),
+ eps=parameters["optimizer"].get("eps", 1e-8),
+ weight_decay=parameters["optimizer"].get("weight_decay", 0),
+ foreach=parameters["optimizer"].get("foreach", None),
+ )
+
|
{"golden_diff": "diff --git a/GANDLF/optimizers/__init__.py b/GANDLF/optimizers/__init__.py\n--- a/GANDLF/optimizers/__init__.py\n+++ b/GANDLF/optimizers/__init__.py\n@@ -9,6 +9,7 @@\n adadelta,\n adagrad,\n rmsprop,\n+ radam,\n )\n \n global_optimizer_dict = {\n@@ -22,6 +23,7 @@\n \"adadelta\": adadelta,\n \"adagrad\": adagrad,\n \"rmsprop\": rmsprop,\n+ \"radam\": radam,\n }\n \n \ndiff --git a/GANDLF/optimizers/wrap_torch.py b/GANDLF/optimizers/wrap_torch.py\n--- a/GANDLF/optimizers/wrap_torch.py\n+++ b/GANDLF/optimizers/wrap_torch.py\n@@ -9,6 +9,7 @@\n Adadelta,\n Adagrad,\n RMSprop,\n+ RAdam\n )\n \n \n@@ -223,3 +224,23 @@\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n \n+def radam(parameters):\n+ \"\"\"\n+ Creates a RAdam optimizer from the PyTorch `torch.optim` module using the input parameters. \n+ \n+ Args: \n+ parameters (dict): A dictionary containing the input parameters for the optimizer.\n+ \n+ Returns: \n+ optimizer (torch.optim.RAdam): A RAdam optimizer.\n+ \"\"\"\n+ # Create the optimizer using the input parameters\n+ return RAdam(\n+ parameters[\"model_parameters\"],\n+ lr=parameters.get(\"learning_rate\"),\n+ betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n+ eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n+ weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n+ foreach=parameters[\"optimizer\"].get(\"foreach\", None),\n+ )\n+\n", "issue": "Add another optimizer (or 2) as option\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, GaNDLF only offers Adam and SGD as options for optimizers, which could sometimes be a bit limiting.\r\n\r\n**Describe the solution you'd like**\r\nAdd ~[RMSProp](https://pytorch.org/docs/stable/generated/torch.optim.RMSprop.html#torch.optim.RMSprop), and optionally~ one or more of the following:\r\n- [LBFGS](https://pytorch.org/docs/1.13/generated/torch.optim.LBFGS.html#torch.optim.LBFGS) \r\n- [RAdam](https://pytorch.org/docs/1.13/generated/torch.optim.RAdam.html#torch.optim.RAdam)\r\n- ~[Rprop](https://pytorch.org/docs/1.11/generated/torch.optim.Rprop.html#torch.optim.Rprop)~\r\n- ~[ASGD](https://pytorch.org/docs/1.11/generated/torch.optim.ASGD.html#torch.optim.ASGD)~\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\r\n\r\nEDIT [2023/03/20]: updated to the optimizers that are actually missing.\n", "before_files": [{"content": "from .wrap_torch import (\n sgd,\n asgd,\n adam,\n adamw,\n adamax,\n # sparseadam,\n rprop,\n adadelta,\n adagrad,\n rmsprop,\n)\n\nglobal_optimizer_dict = {\n \"sgd\": sgd,\n \"asgd\": asgd,\n \"adam\": adam,\n \"adamw\": adamw,\n \"adamax\": adamax,\n # \"sparseadam\": sparseadam,\n \"rprop\": rprop,\n \"adadelta\": adadelta,\n \"adagrad\": adagrad,\n \"rmsprop\": rmsprop,\n}\n\n\ndef get_optimizer(params):\n \"\"\"\n Returns an instance of the specified optimizer from the PyTorch `torch.optim` module.\n\n Args:\n params (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Optimizer): An instance of the specified optimizer.\n\n \"\"\"\n # Retrieve the optimizer type from the input parameters\n optimizer_type = params[\"optimizer\"][\"type\"]\n\n # Create the optimizer instance using the specified type and input parameters\n if optimizer_type in global_optimizer_dict:\n optimizer_function = global_optimizer_dict[optimizer_type]\n return optimizer_function(params)\n else:\n raise ValueError(\"Optimizer type %s not found\" % optimizer_type)\n", "path": "GANDLF/optimizers/__init__.py"}, {"content": "from torch.optim import (\n SGD,\n ASGD,\n Rprop,\n Adam,\n AdamW,\n # SparseAdam,\n Adamax,\n Adadelta,\n Adagrad,\n RMSprop,\n)\n\n\ndef sgd(parameters):\n \"\"\"\n Creates a Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.SGD): A Stochastic Gradient Descent optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n optimizer = SGD(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n momentum=parameters[\"optimizer\"].get(\"momentum\", 0.9),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n dampening=parameters[\"optimizer\"].get(\"dampening\", 0),\n nesterov=parameters[\"optimizer\"].get(\"Nesterov\", False),\n )\n\n return optimizer\n\n\ndef asgd(parameters):\n \"\"\"\n Creates an Averaged Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.ASGD): An Averaged Stochastic Gradient Descent optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return ASGD(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n alpha=parameters[\"optimizer\"].get(\"alpha\", 0.75),\n t0=parameters[\"optimizer\"].get(\"t0\", 1e6),\n lambd=parameters[\"optimizer\"].get(\"lambd\", 1e-4),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef adam(parameters, opt_type=\"normal\"):\n \"\"\"\n Creates an Adam or AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n opt_type (str): A string indicating the type of optimizer to create (either \"normal\" for Adam or \"AdamW\" for AdamW).\n\n Returns:\n optimizer (torch.optim.Adam or torch.optim.AdamW): An Adam or AdamW optimizer.\n\n \"\"\"\n # Determine which optimizer to create based on opt_type\n if opt_type == \"normal\":\n optimizer_fn = Adam\n elif opt_type == \"AdamW\":\n optimizer_fn = AdamW\n else:\n raise ValueError(f\"Invalid optimizer type: {opt_type}\")\n\n # Create the optimizer using the input parameters\n return optimizer_fn(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0.00005),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n amsgrad=parameters[\"optimizer\"].get(\"amsgrad\", False),\n )\n\n\ndef adamw(parameters):\n \"\"\"\n Creates an AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.AdamW): An AdamW optimizer.\n\n \"\"\"\n return adam(parameters, opt_type=\"AdamW\")\n\ndef adamax(parameters):\n \"\"\"\n Creates an Adamax optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adamax): An Adamax optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Adamax(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0.00005),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n )\n\n\n# def sparseadam(parameters):\n# # pick defaults\n# if not (\"betas\" in parameters[\"optimizer\"]):\n# parameters[\"optimizer\"][\"betas\"] = (0.9, 0.999)\n# if not (\"eps\" in parameters[\"optimizer\"]):\n# parameters[\"optimizer\"][\"eps\"] = 1e-8\n\n# return SparseAdam(\n# parameters[\"model_parameters\"],\n# lr=parameters[\"learning_rate\"],\n# betas=parameters[\"optimizer\"][\"betas\"],\n# eps=parameters[\"optimizer\"][\"eps\"],\n# )\n\n\ndef rprop(parameters):\n \"\"\"\n Creates a Resilient Backpropagation optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Rprop): A Resilient Backpropagation optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Rprop(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n etas=parameters[\"optimizer\"].get(\"etas\", (0.5, 1.2)),\n step_sizes=parameters[\"optimizer\"].get(\"step_sizes\", (1e-7, 50)),\n )\n\n\ndef adadelta(parameters):\n \"\"\"\n Creates an Adadelta optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adadelta): An Adadelta optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Adadelta(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n rho=parameters[\"optimizer\"].get(\"rho\", 0.9),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-6),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef adagrad(parameters):\n \"\"\"\n Creates an Adagrad optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adagrad): An Adagrad optimizer.\n\n \"\"\"\n\n # Create the optimizer using the input parameters\n return Adagrad(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n lr_decay=parameters[\"optimizer\"].get(\"lr_decay\", 0),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-6),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef rmsprop(parameters):\n \"\"\"\n Creates an RMSprop optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.RMSprop): An RMSprop optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return RMSprop(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n alpha=parameters[\"optimizer\"].get(\"alpha\", 0.99),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n centered=parameters[\"optimizer\"].get(\"centered\", False),\n momentum=parameters[\"optimizer\"].get(\"momentum\", 0),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n", "path": "GANDLF/optimizers/wrap_torch.py"}], "after_files": [{"content": "from .wrap_torch import (\n sgd,\n asgd,\n adam,\n adamw,\n adamax,\n # sparseadam,\n rprop,\n adadelta,\n adagrad,\n rmsprop,\n radam,\n)\n\nglobal_optimizer_dict = {\n \"sgd\": sgd,\n \"asgd\": asgd,\n \"adam\": adam,\n \"adamw\": adamw,\n \"adamax\": adamax,\n # \"sparseadam\": sparseadam,\n \"rprop\": rprop,\n \"adadelta\": adadelta,\n \"adagrad\": adagrad,\n \"rmsprop\": rmsprop,\n \"radam\": radam,\n}\n\n\ndef get_optimizer(params):\n \"\"\"\n Returns an instance of the specified optimizer from the PyTorch `torch.optim` module.\n\n Args:\n params (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Optimizer): An instance of the specified optimizer.\n\n \"\"\"\n # Retrieve the optimizer type from the input parameters\n optimizer_type = params[\"optimizer\"][\"type\"]\n\n # Create the optimizer instance using the specified type and input parameters\n if optimizer_type in global_optimizer_dict:\n optimizer_function = global_optimizer_dict[optimizer_type]\n return optimizer_function(params)\n else:\n raise ValueError(\"Optimizer type %s not found\" % optimizer_type)\n", "path": "GANDLF/optimizers/__init__.py"}, {"content": "from torch.optim import (\n SGD,\n ASGD,\n Rprop,\n Adam,\n AdamW,\n # SparseAdam,\n Adamax,\n Adadelta,\n Adagrad,\n RMSprop,\n RAdam\n)\n\n\ndef sgd(parameters):\n \"\"\"\n Creates a Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.SGD): A Stochastic Gradient Descent optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n optimizer = SGD(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n momentum=parameters[\"optimizer\"].get(\"momentum\", 0.9),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n dampening=parameters[\"optimizer\"].get(\"dampening\", 0),\n nesterov=parameters[\"optimizer\"].get(\"Nesterov\", False),\n )\n\n return optimizer\n\n\ndef asgd(parameters):\n \"\"\"\n Creates an Averaged Stochastic Gradient Descent optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.ASGD): An Averaged Stochastic Gradient Descent optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return ASGD(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n alpha=parameters[\"optimizer\"].get(\"alpha\", 0.75),\n t0=parameters[\"optimizer\"].get(\"t0\", 1e6),\n lambd=parameters[\"optimizer\"].get(\"lambd\", 1e-4),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef adam(parameters, opt_type=\"normal\"):\n \"\"\"\n Creates an Adam or AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n opt_type (str): A string indicating the type of optimizer to create (either \"normal\" for Adam or \"AdamW\" for AdamW).\n\n Returns:\n optimizer (torch.optim.Adam or torch.optim.AdamW): An Adam or AdamW optimizer.\n\n \"\"\"\n # Determine which optimizer to create based on opt_type\n if opt_type == \"normal\":\n optimizer_fn = Adam\n elif opt_type == \"AdamW\":\n optimizer_fn = AdamW\n else:\n raise ValueError(f\"Invalid optimizer type: {opt_type}\")\n\n # Create the optimizer using the input parameters\n return optimizer_fn(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0.00005),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n amsgrad=parameters[\"optimizer\"].get(\"amsgrad\", False),\n )\n\n\ndef adamw(parameters):\n \"\"\"\n Creates an AdamW optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.AdamW): An AdamW optimizer.\n\n \"\"\"\n return adam(parameters, opt_type=\"AdamW\")\n\ndef adamax(parameters):\n \"\"\"\n Creates an Adamax optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adamax): An Adamax optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Adamax(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0.00005),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n )\n\n\n# def sparseadam(parameters):\n# # pick defaults\n# if not (\"betas\" in parameters[\"optimizer\"]):\n# parameters[\"optimizer\"][\"betas\"] = (0.9, 0.999)\n# if not (\"eps\" in parameters[\"optimizer\"]):\n# parameters[\"optimizer\"][\"eps\"] = 1e-8\n\n# return SparseAdam(\n# parameters[\"model_parameters\"],\n# lr=parameters[\"learning_rate\"],\n# betas=parameters[\"optimizer\"][\"betas\"],\n# eps=parameters[\"optimizer\"][\"eps\"],\n# )\n\n\ndef rprop(parameters):\n \"\"\"\n Creates a Resilient Backpropagation optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Rprop): A Resilient Backpropagation optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Rprop(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n etas=parameters[\"optimizer\"].get(\"etas\", (0.5, 1.2)),\n step_sizes=parameters[\"optimizer\"].get(\"step_sizes\", (1e-7, 50)),\n )\n\n\ndef adadelta(parameters):\n \"\"\"\n Creates an Adadelta optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adadelta): An Adadelta optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return Adadelta(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n rho=parameters[\"optimizer\"].get(\"rho\", 0.9),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-6),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef adagrad(parameters):\n \"\"\"\n Creates an Adagrad optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.Adagrad): An Adagrad optimizer.\n\n \"\"\"\n\n # Create the optimizer using the input parameters\n return Adagrad(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n lr_decay=parameters[\"optimizer\"].get(\"lr_decay\", 0),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-6),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\n\ndef rmsprop(parameters):\n \"\"\"\n Creates an RMSprop optimizer from the PyTorch `torch.optim` module using the input parameters.\n\n Args:\n parameters (dict): A dictionary containing the input parameters for the optimizer.\n\n Returns:\n optimizer (torch.optim.RMSprop): An RMSprop optimizer.\n\n \"\"\"\n # Create the optimizer using the input parameters\n return RMSprop(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n alpha=parameters[\"optimizer\"].get(\"alpha\", 0.99),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n centered=parameters[\"optimizer\"].get(\"centered\", False),\n momentum=parameters[\"optimizer\"].get(\"momentum\", 0),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n )\n\ndef radam(parameters):\n \"\"\"\n Creates a RAdam optimizer from the PyTorch `torch.optim` module using the input parameters. \n \n Args: \n parameters (dict): A dictionary containing the input parameters for the optimizer.\n \n Returns: \n optimizer (torch.optim.RAdam): A RAdam optimizer.\n \"\"\"\n # Create the optimizer using the input parameters\n return RAdam(\n parameters[\"model_parameters\"],\n lr=parameters.get(\"learning_rate\"),\n betas=parameters[\"optimizer\"].get(\"betas\", (0.9, 0.999)),\n eps=parameters[\"optimizer\"].get(\"eps\", 1e-8),\n weight_decay=parameters[\"optimizer\"].get(\"weight_decay\", 0),\n foreach=parameters[\"optimizer\"].get(\"foreach\", None),\n )\n\n", "path": "GANDLF/optimizers/wrap_torch.py"}]}
| 3,211 | 452 |
gh_patches_debug_34072
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-20842
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] geographiclib/2.1.1
### Package Details
* Package Name/Version: **geographiclib/2.1.1**
* Changelog: **https://geographiclib.sourceforge.io/C++/doc/changes.html**
* Source-Link: https://github.com/geographiclib/geographiclib/tree/v2.1.1
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/geographiclib/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.build import check_min_cppstd
4 from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
5 from conan.tools.files import (
6 apply_conandata_patches, collect_libs, copy, export_conandata_patches, get,
7 replace_in_file, rm, rmdir
8 )
9 from conan.tools.scm import Version
10 import os
11
12 required_conan_version = ">=1.53.0"
13
14
15 class GeographiclibConan(ConanFile):
16 name = "geographiclib"
17 description = "Convert geographic units and solve geodesic problems"
18 topics = ("geographiclib", "geodesic")
19 url = "https://github.com/conan-io/conan-center-index"
20 homepage = "https://geographiclib.sourceforge.io"
21 license = "MIT"
22
23 settings = "os", "arch", "compiler", "build_type"
24 options = {
25 "shared": [True, False],
26 "fPIC": [True, False],
27 "precision": ["float", "double", "extended", "quadruple", "variable"],
28 "tools": [True, False],
29 }
30 default_options = {
31 "shared": False,
32 "fPIC": True,
33 "precision": "double",
34 "tools": True,
35 }
36
37 def export_sources(self):
38 export_conandata_patches(self)
39
40 def config_options(self):
41 if self.settings.os == "Windows":
42 del self.options.fPIC
43
44 def configure(self):
45 if self.options.shared:
46 self.options.rm_safe("fPIC")
47
48 def layout(self):
49 cmake_layout(self, src_folder="src")
50
51 @property
52 def _compilers_minimum_version(self):
53 # Minimum compiler version having C++11 math functions
54 return {
55 "apple-clang": "3.3",
56 "gcc": "4.9",
57 "clang": "6",
58 "Visual Studio": "14", # guess
59 "msvc": "190",
60 }
61
62 def validate(self):
63 if Version(self.version) >= "1.51":
64 if self.settings.compiler.get_safe("cppstd"):
65 check_min_cppstd(self, 11)
66
67 def loose_lt_semver(v1, v2):
68 lv1 = [int(v) for v in v1.split(".")]
69 lv2 = [int(v) for v in v2.split(".")]
70 min_length = min(len(lv1), len(lv2))
71 return lv1[:min_length] < lv2[:min_length]
72
73 minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
74 if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):
75 raise ConanInvalidConfiguration(
76 f"{self.ref} requires C++11 math functions, which your compiler does not support."
77 )
78
79 if self.options.precision not in ["float", "double"]:
80 # FIXME: add support for extended, quadruple and variable precisions
81 # (may require external libs: boost multiprecision for quadruple, mpfr for variable)
82 raise ConanInvalidConfiguration("extended, quadruple and variable precisions not yet supported in this recipe")
83
84 def source(self):
85 get(self, **self.conan_data["sources"][self.version], strip_root=True)
86
87 @property
88 def _cmake_option_precision(self):
89 return {
90 "float": 1,
91 "double": 2,
92 "extended": 3,
93 "quadruple": 4,
94 "variable": 5,
95 }.get(str(self.options.precision))
96
97 def generate(self):
98 tc = CMakeToolchain(self)
99 tc.variables["GEOGRAPHICLIB_LIB_TYPE"] = "SHARED" if self.options.shared else "STATIC"
100 tc.variables["GEOGRAPHICLIB_PRECISION"] = self._cmake_option_precision
101 tc.generate()
102
103 def _patch_sources(self):
104 apply_conandata_patches(self)
105 cmakelists = os.path.join(self.source_folder, "CMakeLists.txt")
106 # it does not work on Windows but is not needed
107 replace_in_file(self, cmakelists, "add_subdirectory (js)", "")
108 # Don't install system libs
109 replace_in_file(self, cmakelists, "include (InstallRequiredSystemLibraries)", "")
110 # Don't build tools if asked
111 if not self.options.tools:
112 replace_in_file(self, cmakelists, "add_subdirectory (tools)", "")
113 replace_in_file(self, os.path.join(self.source_folder, "cmake", "CMakeLists.txt"),
114 "${TOOLS}", "")
115
116 def build(self):
117 self._patch_sources()
118 cmake = CMake(self)
119 cmake.configure()
120 cmake.build()
121
122 def package(self):
123 copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
124 cmake = CMake(self)
125 cmake.install()
126 for folder in [
127 "share", "sbin", "python", "matlab", "doc", "cmake",
128 os.path.join("lib", "python"),
129 os.path.join("lib", "pkgconfig"),
130 os.path.join("lib", "cmake"),
131 ]:
132 rmdir(self, os.path.join(os.path.join(self.package_folder, folder)))
133 rm(self, "*.pdb", os.path.join(self.package_folder, "bin"))
134
135 def package_info(self):
136 self.cpp_info.set_property("cmake_file_name", "geographiclib")
137 self.cpp_info.set_property("cmake_target_name", "GeographicLib::GeographicLib")
138 self.cpp_info.set_property("pkg_config_name", "geographiclib")
139 self.cpp_info.libs = collect_libs(self)
140 self.cpp_info.defines.append("GEOGRAPHICLIB_SHARED_LIB={}".format("1" if self.options.shared else "0"))
141
142 # TODO: to remove in conan v2 once cmake_find_package_* generators removed
143 self.cpp_info.filenames["cmake_find_package"] = "geographiclib"
144 self.cpp_info.filenames["cmake_find_package_multi"] = "geographiclib"
145 self.cpp_info.names["cmake_find_package"] = "GeographicLib"
146 self.cpp_info.names["cmake_find_package_multi"] = "GeographicLib"
147 if self.options.tools:
148 self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/geographiclib/all/conanfile.py b/recipes/geographiclib/all/conanfile.py
--- a/recipes/geographiclib/all/conanfile.py
+++ b/recipes/geographiclib/all/conanfile.py
@@ -65,10 +65,7 @@
check_min_cppstd(self, 11)
def loose_lt_semver(v1, v2):
- lv1 = [int(v) for v in v1.split(".")]
- lv2 = [int(v) for v in v2.split(".")]
- min_length = min(len(lv1), len(lv2))
- return lv1[:min_length] < lv2[:min_length]
+ return all(int(p1) < int(p2) for p1, p2 in zip(str(v1).split("."), str(v2).split(".")))
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):
@@ -103,8 +100,9 @@
def _patch_sources(self):
apply_conandata_patches(self)
cmakelists = os.path.join(self.source_folder, "CMakeLists.txt")
- # it does not work on Windows but is not needed
- replace_in_file(self, cmakelists, "add_subdirectory (js)", "")
+ if Version(self.version) < "2":
+ # it does not work on Windows but is not needed
+ replace_in_file(self, cmakelists, "add_subdirectory (js)", "")
# Don't install system libs
replace_in_file(self, cmakelists, "include (InstallRequiredSystemLibraries)", "")
# Don't build tools if asked
@@ -112,6 +110,9 @@
replace_in_file(self, cmakelists, "add_subdirectory (tools)", "")
replace_in_file(self, os.path.join(self.source_folder, "cmake", "CMakeLists.txt"),
"${TOOLS}", "")
+ # Disable -Werror
+ replace_in_file(self, cmakelists, "-Werror", "")
+ replace_in_file(self, cmakelists, "/WX", "")
def build(self):
self._patch_sources()
|
{"golden_diff": "diff --git a/recipes/geographiclib/all/conanfile.py b/recipes/geographiclib/all/conanfile.py\n--- a/recipes/geographiclib/all/conanfile.py\n+++ b/recipes/geographiclib/all/conanfile.py\n@@ -65,10 +65,7 @@\n check_min_cppstd(self, 11)\n \n def loose_lt_semver(v1, v2):\n- lv1 = [int(v) for v in v1.split(\".\")]\n- lv2 = [int(v) for v in v2.split(\".\")]\n- min_length = min(len(lv1), len(lv2))\n- return lv1[:min_length] < lv2[:min_length]\n+ return all(int(p1) < int(p2) for p1, p2 in zip(str(v1).split(\".\"), str(v2).split(\".\")))\n \n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):\n@@ -103,8 +100,9 @@\n def _patch_sources(self):\n apply_conandata_patches(self)\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n- # it does not work on Windows but is not needed\n- replace_in_file(self, cmakelists, \"add_subdirectory (js)\", \"\")\n+ if Version(self.version) < \"2\":\n+ # it does not work on Windows but is not needed\n+ replace_in_file(self, cmakelists, \"add_subdirectory (js)\", \"\")\n # Don't install system libs\n replace_in_file(self, cmakelists, \"include (InstallRequiredSystemLibraries)\", \"\")\n # Don't build tools if asked\n@@ -112,6 +110,9 @@\n replace_in_file(self, cmakelists, \"add_subdirectory (tools)\", \"\")\n replace_in_file(self, os.path.join(self.source_folder, \"cmake\", \"CMakeLists.txt\"),\n \"${TOOLS}\", \"\")\n+ # Disable -Werror\n+ replace_in_file(self, cmakelists, \"-Werror\", \"\")\n+ replace_in_file(self, cmakelists, \"/WX\", \"\")\n \n def build(self):\n self._patch_sources()\n", "issue": "[request] geographiclib/2.1.1\n### Package Details\r\n * Package Name/Version: **geographiclib/2.1.1**\r\n * Changelog: **https://geographiclib.sourceforge.io/C++/doc/changes.html**\r\n\r\n* Source-Link: https://github.com/geographiclib/geographiclib/tree/v2.1.1\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import (\n apply_conandata_patches, collect_libs, copy, export_conandata_patches, get,\n replace_in_file, rm, rmdir\n)\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass GeographiclibConan(ConanFile):\n name = \"geographiclib\"\n description = \"Convert geographic units and solve geodesic problems\"\n topics = (\"geographiclib\", \"geodesic\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://geographiclib.sourceforge.io\"\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"precision\": [\"float\", \"double\", \"extended\", \"quadruple\", \"variable\"],\n \"tools\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"precision\": \"double\",\n \"tools\": True,\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n @property\n def _compilers_minimum_version(self):\n # Minimum compiler version having C++11 math functions\n return {\n \"apple-clang\": \"3.3\",\n \"gcc\": \"4.9\",\n \"clang\": \"6\",\n \"Visual Studio\": \"14\", # guess\n \"msvc\": \"190\",\n }\n\n def validate(self):\n if Version(self.version) >= \"1.51\":\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n\n def loose_lt_semver(v1, v2):\n lv1 = [int(v) for v in v1.split(\".\")]\n lv2 = [int(v) for v in v2.split(\".\")]\n min_length = min(len(lv1), len(lv2))\n return lv1[:min_length] < lv2[:min_length]\n\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++11 math functions, which your compiler does not support.\"\n )\n\n if self.options.precision not in [\"float\", \"double\"]:\n # FIXME: add support for extended, quadruple and variable precisions\n # (may require external libs: boost multiprecision for quadruple, mpfr for variable)\n raise ConanInvalidConfiguration(\"extended, quadruple and variable precisions not yet supported in this recipe\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n @property\n def _cmake_option_precision(self):\n return {\n \"float\": 1,\n \"double\": 2,\n \"extended\": 3,\n \"quadruple\": 4,\n \"variable\": 5,\n }.get(str(self.options.precision))\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"GEOGRAPHICLIB_LIB_TYPE\"] = \"SHARED\" if self.options.shared else \"STATIC\"\n tc.variables[\"GEOGRAPHICLIB_PRECISION\"] = self._cmake_option_precision\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n # it does not work on Windows but is not needed\n replace_in_file(self, cmakelists, \"add_subdirectory (js)\", \"\")\n # Don't install system libs\n replace_in_file(self, cmakelists, \"include (InstallRequiredSystemLibraries)\", \"\")\n # Don't build tools if asked\n if not self.options.tools:\n replace_in_file(self, cmakelists, \"add_subdirectory (tools)\", \"\")\n replace_in_file(self, os.path.join(self.source_folder, \"cmake\", \"CMakeLists.txt\"),\n \"${TOOLS}\", \"\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.txt\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n for folder in [\n \"share\", \"sbin\", \"python\", \"matlab\", \"doc\", \"cmake\",\n os.path.join(\"lib\", \"python\"),\n os.path.join(\"lib\", \"pkgconfig\"),\n os.path.join(\"lib\", \"cmake\"),\n ]:\n rmdir(self, os.path.join(os.path.join(self.package_folder, folder)))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"geographiclib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"GeographicLib::GeographicLib\")\n self.cpp_info.set_property(\"pkg_config_name\", \"geographiclib\")\n self.cpp_info.libs = collect_libs(self)\n self.cpp_info.defines.append(\"GEOGRAPHICLIB_SHARED_LIB={}\".format(\"1\" if self.options.shared else \"0\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.filenames[\"cmake_find_package\"] = \"geographiclib\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"geographiclib\"\n self.cpp_info.names[\"cmake_find_package\"] = \"GeographicLib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"GeographicLib\"\n if self.options.tools:\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/geographiclib/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\nfrom conan.tools.files import (\n apply_conandata_patches, collect_libs, copy, export_conandata_patches, get,\n replace_in_file, rm, rmdir\n)\nfrom conan.tools.scm import Version\nimport os\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass GeographiclibConan(ConanFile):\n name = \"geographiclib\"\n description = \"Convert geographic units and solve geodesic problems\"\n topics = (\"geographiclib\", \"geodesic\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://geographiclib.sourceforge.io\"\n license = \"MIT\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"precision\": [\"float\", \"double\", \"extended\", \"quadruple\", \"variable\"],\n \"tools\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"precision\": \"double\",\n \"tools\": True,\n }\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n @property\n def _compilers_minimum_version(self):\n # Minimum compiler version having C++11 math functions\n return {\n \"apple-clang\": \"3.3\",\n \"gcc\": \"4.9\",\n \"clang\": \"6\",\n \"Visual Studio\": \"14\", # guess\n \"msvc\": \"190\",\n }\n\n def validate(self):\n if Version(self.version) >= \"1.51\":\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 11)\n\n def loose_lt_semver(v1, v2):\n return all(int(p1) < int(p2) for p1, p2 in zip(str(v1).split(\".\"), str(v2).split(\".\")))\n\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if minimum_version and loose_lt_semver(str(self.settings.compiler.version), minimum_version):\n raise ConanInvalidConfiguration(\n f\"{self.ref} requires C++11 math functions, which your compiler does not support.\"\n )\n\n if self.options.precision not in [\"float\", \"double\"]:\n # FIXME: add support for extended, quadruple and variable precisions\n # (may require external libs: boost multiprecision for quadruple, mpfr for variable)\n raise ConanInvalidConfiguration(\"extended, quadruple and variable precisions not yet supported in this recipe\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n @property\n def _cmake_option_precision(self):\n return {\n \"float\": 1,\n \"double\": 2,\n \"extended\": 3,\n \"quadruple\": 4,\n \"variable\": 5,\n }.get(str(self.options.precision))\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.variables[\"GEOGRAPHICLIB_LIB_TYPE\"] = \"SHARED\" if self.options.shared else \"STATIC\"\n tc.variables[\"GEOGRAPHICLIB_PRECISION\"] = self._cmake_option_precision\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n if Version(self.version) < \"2\":\n # it does not work on Windows but is not needed\n replace_in_file(self, cmakelists, \"add_subdirectory (js)\", \"\")\n # Don't install system libs\n replace_in_file(self, cmakelists, \"include (InstallRequiredSystemLibraries)\", \"\")\n # Don't build tools if asked\n if not self.options.tools:\n replace_in_file(self, cmakelists, \"add_subdirectory (tools)\", \"\")\n replace_in_file(self, os.path.join(self.source_folder, \"cmake\", \"CMakeLists.txt\"),\n \"${TOOLS}\", \"\")\n # Disable -Werror\n replace_in_file(self, cmakelists, \"-Werror\", \"\")\n replace_in_file(self, cmakelists, \"/WX\", \"\")\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n copy(self, \"LICENSE.txt\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n cmake = CMake(self)\n cmake.install()\n for folder in [\n \"share\", \"sbin\", \"python\", \"matlab\", \"doc\", \"cmake\",\n os.path.join(\"lib\", \"python\"),\n os.path.join(\"lib\", \"pkgconfig\"),\n os.path.join(\"lib\", \"cmake\"),\n ]:\n rmdir(self, os.path.join(os.path.join(self.package_folder, folder)))\n rm(self, \"*.pdb\", os.path.join(self.package_folder, \"bin\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"geographiclib\")\n self.cpp_info.set_property(\"cmake_target_name\", \"GeographicLib::GeographicLib\")\n self.cpp_info.set_property(\"pkg_config_name\", \"geographiclib\")\n self.cpp_info.libs = collect_libs(self)\n self.cpp_info.defines.append(\"GEOGRAPHICLIB_SHARED_LIB={}\".format(\"1\" if self.options.shared else \"0\"))\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self.cpp_info.filenames[\"cmake_find_package\"] = \"geographiclib\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"geographiclib\"\n self.cpp_info.names[\"cmake_find_package\"] = \"GeographicLib\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"GeographicLib\"\n if self.options.tools:\n self.env_info.PATH.append(os.path.join(self.package_folder, \"bin\"))\n", "path": "recipes/geographiclib/all/conanfile.py"}]}
| 2,101 | 506 |
gh_patches_debug_12577
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-1537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Types not always enforced on events
Haven't dug deeply on this one but something equivalent to this is compiling for me:
```python
Sale: event({eth_sold: indexed(uint256(wei))})
@public
def logSale(amount: uint256):
log.Sale(amount)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vyper/parser/events.py`
Content:
```
1 from vyper import ast
2 from vyper.exceptions import (
3 InvalidLiteralException,
4 TypeMismatchException,
5 )
6 from vyper.parser.expr import (
7 Expr,
8 )
9 from vyper.parser.lll_node import (
10 LLLnode,
11 )
12 from vyper.parser.parser_utils import (
13 base_type_conversion,
14 byte_array_to_num,
15 getpos,
16 make_byte_array_copier,
17 make_setter,
18 unwrap_location,
19 )
20 from vyper.types.types import (
21 BaseType,
22 ByteArrayLike,
23 ListType,
24 get_size_of_type,
25 )
26 from vyper.utils import (
27 bytes_to_int,
28 ceil32,
29 string_to_bytes,
30 )
31
32
33 def pack_logging_topics(event_id, args, expected_topics, context, pos):
34 topics = [event_id]
35 code_pos = pos
36 for pos, expected_topic in enumerate(expected_topics):
37 expected_type = expected_topic.typ
38 arg = args[pos]
39 value = Expr(arg, context).lll_node
40 arg_type = value.typ
41
42 if isinstance(arg_type, ByteArrayLike) and isinstance(expected_type, ByteArrayLike):
43 if arg_type.maxlen > expected_type.maxlen:
44 raise TypeMismatchException(
45 "Topic input bytes are too big: %r %r" % (arg_type, expected_type), code_pos
46 )
47 if isinstance(arg, ast.Str):
48 bytez, bytez_length = string_to_bytes(arg.s)
49 if len(bytez) > 32:
50 raise InvalidLiteralException(
51 "Can only log a maximum of 32 bytes at a time.", code_pos
52 )
53 topics.append(bytes_to_int(bytez + b'\x00' * (32 - bytez_length)))
54 else:
55 if value.location == "memory":
56 size = ['mload', value]
57 elif value.location == "storage":
58 size = ['sload', ['sha3_32', value]]
59 topics.append(byte_array_to_num(value, arg, 'uint256', size))
60 else:
61 value = unwrap_location(value)
62 value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)
63 topics.append(value)
64
65 return topics
66
67
68 def pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,
69 dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):
70 """
71 Copy necessary variables to pre-allocated memory section.
72
73 :param holder: Complete holder for all args
74 :param maxlen: Total length in bytes of the full arg section (static + dynamic).
75 :param arg: Current arg to pack
76 :param context: Context of arg
77 :param placeholder: Static placeholder for static argument part.
78 :param dynamic_offset_counter: position counter stored in static args.
79 :param dynamic_placeholder: pointer to current position in memory to write dynamic values to.
80 :param datamem_start: position where the whole datemem section starts.
81 """
82
83 if isinstance(typ, BaseType):
84 if isinstance(arg, LLLnode):
85 value = unwrap_location(arg)
86 else:
87 value = Expr(arg, context).lll_node
88 value = base_type_conversion(value, value.typ, typ, pos)
89 holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))
90 elif isinstance(typ, ByteArrayLike):
91
92 if isinstance(arg, LLLnode): # Is prealloacted variable.
93 source_lll = arg
94 else:
95 source_lll = Expr(arg, context).lll_node
96
97 # Set static offset, in arg slot.
98 holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))
99 # Get the biginning to write the ByteArray to.
100 dest_placeholder = LLLnode.from_list(
101 ['add', datamem_start, ['mload', dynamic_offset_counter]],
102 typ=typ, location='memory', annotation="pack_args_by_32:dest_placeholder")
103 copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)
104 holder.append(copier)
105 # Add zero padding.
106 new_maxlen = ceil32(source_lll.typ.maxlen)
107
108 holder.append([
109 'with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]], [
110 'seq', ['with', '_bytearray_loc', dest_placeholder, [
111 'seq', ['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen, [
112 'seq',
113 # stay within allocated bounds
114 ['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'],
115 [
116 'mstore8',
117 ['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]],
118 0,
119 ],
120 ]],
121 ]],
122 ]
123 ])
124
125 # Increment offset counter.
126 increment_counter = LLLnode.from_list([
127 'mstore', dynamic_offset_counter,
128 [
129 'add',
130 ['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]],
131 32,
132 ],
133 ], annotation='Increment dynamic offset counter')
134 holder.append(increment_counter)
135 elif isinstance(typ, ListType):
136 maxlen += (typ.count - 1) * 32
137 typ = typ.subtype
138
139 def check_list_type_match(provided): # Check list types match.
140 if provided != typ:
141 raise TypeMismatchException(
142 "Log list type '%s' does not match provided, expected '%s'" % (provided, typ)
143 )
144
145 # NOTE: Below code could be refactored into iterators/getter functions for each type of
146 # repetitive loop. But seeing how each one is a unique for loop, and in which way
147 # the sub value makes the difference in each type of list clearer.
148
149 # List from storage
150 if isinstance(arg, ast.Attribute) and arg.value.id == 'self':
151 stor_list = context.globals[arg.attr]
152 check_list_type_match(stor_list.typ.subtype)
153 size = stor_list.typ.count
154 mem_offset = 0
155 for i in range(0, size):
156 storage_offset = i
157 arg2 = LLLnode.from_list(
158 ['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],
159 typ=typ,
160 )
161 holder, maxlen = pack_args_by_32(
162 holder,
163 maxlen,
164 arg2,
165 typ,
166 context,
167 placeholder + mem_offset,
168 pos=pos,
169 )
170 mem_offset += get_size_of_type(typ) * 32
171
172 # List from variable.
173 elif isinstance(arg, ast.Name):
174 size = context.vars[arg.id].size
175 pos = context.vars[arg.id].pos
176 check_list_type_match(context.vars[arg.id].typ.subtype)
177 mem_offset = 0
178 for _ in range(0, size):
179 arg2 = LLLnode.from_list(
180 pos + mem_offset,
181 typ=typ,
182 location=context.vars[arg.id].location
183 )
184 holder, maxlen = pack_args_by_32(
185 holder,
186 maxlen,
187 arg2,
188 typ,
189 context,
190 placeholder + mem_offset,
191 pos=pos,
192 )
193 mem_offset += get_size_of_type(typ) * 32
194
195 # List from list literal.
196 else:
197 mem_offset = 0
198 for arg2 in arg.elts:
199 holder, maxlen = pack_args_by_32(
200 holder,
201 maxlen,
202 arg2,
203 typ,
204 context,
205 placeholder + mem_offset,
206 pos=pos,
207 )
208 mem_offset += get_size_of_type(typ) * 32
209 return holder, maxlen
210
211
212 # Pack logging data arguments
213 def pack_logging_data(expected_data, args, context, pos):
214 # Checks to see if there's any data
215 if not args:
216 return ['seq'], 0, None, 0
217 holder = ['seq']
218 maxlen = len(args) * 32 # total size of all packed args (upper limit)
219
220 # Unroll any function calls, to temp variables.
221 prealloacted = {}
222 for idx, (arg, _expected_arg) in enumerate(zip(args, expected_data)):
223
224 if isinstance(arg, (ast.Str, ast.Call)):
225 expr = Expr(arg, context)
226 source_lll = expr.lll_node
227 typ = source_lll.typ
228
229 if isinstance(arg, ast.Str):
230 if len(arg.s) > typ.maxlen:
231 raise TypeMismatchException(
232 "Data input bytes are to big: %r %r" % (len(arg.s), typ), pos
233 )
234
235 tmp_variable = context.new_variable(
236 '_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset),
237 source_lll.typ,
238 )
239 tmp_variable_node = LLLnode.from_list(
240 tmp_variable,
241 typ=source_lll.typ,
242 pos=getpos(arg),
243 location="memory",
244 annotation='log_prealloacted %r' % source_lll.typ,
245 )
246 # Store len.
247 # holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]])
248 # Copy bytes.
249
250 holder.append(
251 make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')
252 )
253 prealloacted[idx] = tmp_variable_node
254
255 requires_dynamic_offset = any([isinstance(data.typ, ByteArrayLike) for data in expected_data])
256 if requires_dynamic_offset:
257 # Iterator used to zero pad memory.
258 zero_pad_i = context.new_placeholder(BaseType('uint256'))
259 dynamic_offset_counter = context.new_placeholder(BaseType(32))
260 dynamic_placeholder = context.new_placeholder(BaseType(32))
261 else:
262 dynamic_offset_counter = None
263 zero_pad_i = None
264
265 # Create placeholder for static args. Note: order of new_*() is important.
266 placeholder_map = {}
267 for i, (_arg, data) in enumerate(zip(args, expected_data)):
268 typ = data.typ
269 if not isinstance(typ, ByteArrayLike):
270 placeholder = context.new_placeholder(typ)
271 else:
272 placeholder = context.new_placeholder(BaseType(32))
273 placeholder_map[i] = placeholder
274
275 # Populate static placeholders.
276 for i, (arg, data) in enumerate(zip(args, expected_data)):
277 typ = data.typ
278 placeholder = placeholder_map[i]
279 if not isinstance(typ, ByteArrayLike):
280 holder, maxlen = pack_args_by_32(
281 holder,
282 maxlen,
283 prealloacted.get(i, arg),
284 typ,
285 context,
286 placeholder,
287 zero_pad_i=zero_pad_i,
288 pos=pos,
289 )
290
291 # Dynamic position starts right after the static args.
292 if requires_dynamic_offset:
293 holder.append(LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen]))
294
295 # Calculate maximum dynamic offset placeholders, used for gas estimation.
296 for _arg, data in zip(args, expected_data):
297 typ = data.typ
298 if isinstance(typ, ByteArrayLike):
299 maxlen += 32 + ceil32(typ.maxlen)
300
301 if requires_dynamic_offset:
302 datamem_start = dynamic_placeholder + 32
303 else:
304 datamem_start = placeholder_map[0]
305
306 # Copy necessary data into allocated dynamic section.
307 for i, (arg, data) in enumerate(zip(args, expected_data)):
308 typ = data.typ
309 if isinstance(typ, ByteArrayLike):
310 pack_args_by_32(
311 holder=holder,
312 maxlen=maxlen,
313 arg=prealloacted.get(i, arg),
314 typ=typ,
315 context=context,
316 placeholder=placeholder_map[i],
317 datamem_start=datamem_start,
318 dynamic_offset_counter=dynamic_offset_counter,
319 zero_pad_i=zero_pad_i,
320 pos=pos
321 )
322
323 return holder, maxlen, dynamic_offset_counter, datamem_start
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vyper/parser/events.py b/vyper/parser/events.py
--- a/vyper/parser/events.py
+++ b/vyper/parser/events.py
@@ -58,6 +58,11 @@
size = ['sload', ['sha3_32', value]]
topics.append(byte_array_to_num(value, arg, 'uint256', size))
else:
+ if arg_type != expected_type:
+ raise TypeMismatchException(
+ f"Invalid type for logging topic, got {arg_type} expected {expected_type}",
+ value.pos
+ )
value = unwrap_location(value)
value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)
topics.append(value)
|
{"golden_diff": "diff --git a/vyper/parser/events.py b/vyper/parser/events.py\n--- a/vyper/parser/events.py\n+++ b/vyper/parser/events.py\n@@ -58,6 +58,11 @@\n size = ['sload', ['sha3_32', value]]\n topics.append(byte_array_to_num(value, arg, 'uint256', size))\n else:\n+ if arg_type != expected_type:\n+ raise TypeMismatchException(\n+ f\"Invalid type for logging topic, got {arg_type} expected {expected_type}\",\n+ value.pos\n+ )\n value = unwrap_location(value)\n value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)\n topics.append(value)\n", "issue": "Types not always enforced on events\nHaven't dug deeply on this one but something equivalent to this is compiling for me:\r\n\r\n```python\r\nSale: event({eth_sold: indexed(uint256(wei))})\r\n\r\n@public\r\ndef logSale(amount: uint256):\r\n log.Sale(amount)\r\n```\n", "before_files": [{"content": "from vyper import ast\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.lll_node import (\n LLLnode,\n)\nfrom vyper.parser.parser_utils import (\n base_type_conversion,\n byte_array_to_num,\n getpos,\n make_byte_array_copier,\n make_setter,\n unwrap_location,\n)\nfrom vyper.types.types import (\n BaseType,\n ByteArrayLike,\n ListType,\n get_size_of_type,\n)\nfrom vyper.utils import (\n bytes_to_int,\n ceil32,\n string_to_bytes,\n)\n\n\ndef pack_logging_topics(event_id, args, expected_topics, context, pos):\n topics = [event_id]\n code_pos = pos\n for pos, expected_topic in enumerate(expected_topics):\n expected_type = expected_topic.typ\n arg = args[pos]\n value = Expr(arg, context).lll_node\n arg_type = value.typ\n\n if isinstance(arg_type, ByteArrayLike) and isinstance(expected_type, ByteArrayLike):\n if arg_type.maxlen > expected_type.maxlen:\n raise TypeMismatchException(\n \"Topic input bytes are too big: %r %r\" % (arg_type, expected_type), code_pos\n )\n if isinstance(arg, ast.Str):\n bytez, bytez_length = string_to_bytes(arg.s)\n if len(bytez) > 32:\n raise InvalidLiteralException(\n \"Can only log a maximum of 32 bytes at a time.\", code_pos\n )\n topics.append(bytes_to_int(bytez + b'\\x00' * (32 - bytez_length)))\n else:\n if value.location == \"memory\":\n size = ['mload', value]\n elif value.location == \"storage\":\n size = ['sload', ['sha3_32', value]]\n topics.append(byte_array_to_num(value, arg, 'uint256', size))\n else:\n value = unwrap_location(value)\n value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)\n topics.append(value)\n\n return topics\n\n\ndef pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,\n dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):\n \"\"\"\n Copy necessary variables to pre-allocated memory section.\n\n :param holder: Complete holder for all args\n :param maxlen: Total length in bytes of the full arg section (static + dynamic).\n :param arg: Current arg to pack\n :param context: Context of arg\n :param placeholder: Static placeholder for static argument part.\n :param dynamic_offset_counter: position counter stored in static args.\n :param dynamic_placeholder: pointer to current position in memory to write dynamic values to.\n :param datamem_start: position where the whole datemem section starts.\n \"\"\"\n\n if isinstance(typ, BaseType):\n if isinstance(arg, LLLnode):\n value = unwrap_location(arg)\n else:\n value = Expr(arg, context).lll_node\n value = base_type_conversion(value, value.typ, typ, pos)\n holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))\n elif isinstance(typ, ByteArrayLike):\n\n if isinstance(arg, LLLnode): # Is prealloacted variable.\n source_lll = arg\n else:\n source_lll = Expr(arg, context).lll_node\n\n # Set static offset, in arg slot.\n holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))\n # Get the biginning to write the ByteArray to.\n dest_placeholder = LLLnode.from_list(\n ['add', datamem_start, ['mload', dynamic_offset_counter]],\n typ=typ, location='memory', annotation=\"pack_args_by_32:dest_placeholder\")\n copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)\n holder.append(copier)\n # Add zero padding.\n new_maxlen = ceil32(source_lll.typ.maxlen)\n\n holder.append([\n 'with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]], [\n 'seq', ['with', '_bytearray_loc', dest_placeholder, [\n 'seq', ['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen, [\n 'seq',\n # stay within allocated bounds\n ['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'],\n [\n 'mstore8',\n ['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]],\n 0,\n ],\n ]],\n ]],\n ]\n ])\n\n # Increment offset counter.\n increment_counter = LLLnode.from_list([\n 'mstore', dynamic_offset_counter,\n [\n 'add',\n ['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]],\n 32,\n ],\n ], annotation='Increment dynamic offset counter')\n holder.append(increment_counter)\n elif isinstance(typ, ListType):\n maxlen += (typ.count - 1) * 32\n typ = typ.subtype\n\n def check_list_type_match(provided): # Check list types match.\n if provided != typ:\n raise TypeMismatchException(\n \"Log list type '%s' does not match provided, expected '%s'\" % (provided, typ)\n )\n\n # NOTE: Below code could be refactored into iterators/getter functions for each type of\n # repetitive loop. But seeing how each one is a unique for loop, and in which way\n # the sub value makes the difference in each type of list clearer.\n\n # List from storage\n if isinstance(arg, ast.Attribute) and arg.value.id == 'self':\n stor_list = context.globals[arg.attr]\n check_list_type_match(stor_list.typ.subtype)\n size = stor_list.typ.count\n mem_offset = 0\n for i in range(0, size):\n storage_offset = i\n arg2 = LLLnode.from_list(\n ['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],\n typ=typ,\n )\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n\n # List from variable.\n elif isinstance(arg, ast.Name):\n size = context.vars[arg.id].size\n pos = context.vars[arg.id].pos\n check_list_type_match(context.vars[arg.id].typ.subtype)\n mem_offset = 0\n for _ in range(0, size):\n arg2 = LLLnode.from_list(\n pos + mem_offset,\n typ=typ,\n location=context.vars[arg.id].location\n )\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n\n # List from list literal.\n else:\n mem_offset = 0\n for arg2 in arg.elts:\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n return holder, maxlen\n\n\n# Pack logging data arguments\ndef pack_logging_data(expected_data, args, context, pos):\n # Checks to see if there's any data\n if not args:\n return ['seq'], 0, None, 0\n holder = ['seq']\n maxlen = len(args) * 32 # total size of all packed args (upper limit)\n\n # Unroll any function calls, to temp variables.\n prealloacted = {}\n for idx, (arg, _expected_arg) in enumerate(zip(args, expected_data)):\n\n if isinstance(arg, (ast.Str, ast.Call)):\n expr = Expr(arg, context)\n source_lll = expr.lll_node\n typ = source_lll.typ\n\n if isinstance(arg, ast.Str):\n if len(arg.s) > typ.maxlen:\n raise TypeMismatchException(\n \"Data input bytes are to big: %r %r\" % (len(arg.s), typ), pos\n )\n\n tmp_variable = context.new_variable(\n '_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset),\n source_lll.typ,\n )\n tmp_variable_node = LLLnode.from_list(\n tmp_variable,\n typ=source_lll.typ,\n pos=getpos(arg),\n location=\"memory\",\n annotation='log_prealloacted %r' % source_lll.typ,\n )\n # Store len.\n # holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]])\n # Copy bytes.\n\n holder.append(\n make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')\n )\n prealloacted[idx] = tmp_variable_node\n\n requires_dynamic_offset = any([isinstance(data.typ, ByteArrayLike) for data in expected_data])\n if requires_dynamic_offset:\n # Iterator used to zero pad memory.\n zero_pad_i = context.new_placeholder(BaseType('uint256'))\n dynamic_offset_counter = context.new_placeholder(BaseType(32))\n dynamic_placeholder = context.new_placeholder(BaseType(32))\n else:\n dynamic_offset_counter = None\n zero_pad_i = None\n\n # Create placeholder for static args. Note: order of new_*() is important.\n placeholder_map = {}\n for i, (_arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n if not isinstance(typ, ByteArrayLike):\n placeholder = context.new_placeholder(typ)\n else:\n placeholder = context.new_placeholder(BaseType(32))\n placeholder_map[i] = placeholder\n\n # Populate static placeholders.\n for i, (arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n placeholder = placeholder_map[i]\n if not isinstance(typ, ByteArrayLike):\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n prealloacted.get(i, arg),\n typ,\n context,\n placeholder,\n zero_pad_i=zero_pad_i,\n pos=pos,\n )\n\n # Dynamic position starts right after the static args.\n if requires_dynamic_offset:\n holder.append(LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen]))\n\n # Calculate maximum dynamic offset placeholders, used for gas estimation.\n for _arg, data in zip(args, expected_data):\n typ = data.typ\n if isinstance(typ, ByteArrayLike):\n maxlen += 32 + ceil32(typ.maxlen)\n\n if requires_dynamic_offset:\n datamem_start = dynamic_placeholder + 32\n else:\n datamem_start = placeholder_map[0]\n\n # Copy necessary data into allocated dynamic section.\n for i, (arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n if isinstance(typ, ByteArrayLike):\n pack_args_by_32(\n holder=holder,\n maxlen=maxlen,\n arg=prealloacted.get(i, arg),\n typ=typ,\n context=context,\n placeholder=placeholder_map[i],\n datamem_start=datamem_start,\n dynamic_offset_counter=dynamic_offset_counter,\n zero_pad_i=zero_pad_i,\n pos=pos\n )\n\n return holder, maxlen, dynamic_offset_counter, datamem_start\n", "path": "vyper/parser/events.py"}], "after_files": [{"content": "from vyper import ast\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.parser.expr import (\n Expr,\n)\nfrom vyper.parser.lll_node import (\n LLLnode,\n)\nfrom vyper.parser.parser_utils import (\n base_type_conversion,\n byte_array_to_num,\n getpos,\n make_byte_array_copier,\n make_setter,\n unwrap_location,\n)\nfrom vyper.types.types import (\n BaseType,\n ByteArrayLike,\n ListType,\n get_size_of_type,\n)\nfrom vyper.utils import (\n bytes_to_int,\n ceil32,\n string_to_bytes,\n)\n\n\ndef pack_logging_topics(event_id, args, expected_topics, context, pos):\n topics = [event_id]\n code_pos = pos\n for pos, expected_topic in enumerate(expected_topics):\n expected_type = expected_topic.typ\n arg = args[pos]\n value = Expr(arg, context).lll_node\n arg_type = value.typ\n\n if isinstance(arg_type, ByteArrayLike) and isinstance(expected_type, ByteArrayLike):\n if arg_type.maxlen > expected_type.maxlen:\n raise TypeMismatchException(\n \"Topic input bytes are too big: %r %r\" % (arg_type, expected_type), code_pos\n )\n if isinstance(arg, ast.Str):\n bytez, bytez_length = string_to_bytes(arg.s)\n if len(bytez) > 32:\n raise InvalidLiteralException(\n \"Can only log a maximum of 32 bytes at a time.\", code_pos\n )\n topics.append(bytes_to_int(bytez + b'\\x00' * (32 - bytez_length)))\n else:\n if value.location == \"memory\":\n size = ['mload', value]\n elif value.location == \"storage\":\n size = ['sload', ['sha3_32', value]]\n topics.append(byte_array_to_num(value, arg, 'uint256', size))\n else:\n if arg_type != expected_type:\n raise TypeMismatchException(\n f\"Invalid type for logging topic, got {arg_type} expected {expected_type}\",\n value.pos\n )\n value = unwrap_location(value)\n value = base_type_conversion(value, arg_type, expected_type, pos=code_pos)\n topics.append(value)\n\n return topics\n\n\ndef pack_args_by_32(holder, maxlen, arg, typ, context, placeholder,\n dynamic_offset_counter=None, datamem_start=None, zero_pad_i=None, pos=None):\n \"\"\"\n Copy necessary variables to pre-allocated memory section.\n\n :param holder: Complete holder for all args\n :param maxlen: Total length in bytes of the full arg section (static + dynamic).\n :param arg: Current arg to pack\n :param context: Context of arg\n :param placeholder: Static placeholder for static argument part.\n :param dynamic_offset_counter: position counter stored in static args.\n :param dynamic_placeholder: pointer to current position in memory to write dynamic values to.\n :param datamem_start: position where the whole datemem section starts.\n \"\"\"\n\n if isinstance(typ, BaseType):\n if isinstance(arg, LLLnode):\n value = unwrap_location(arg)\n else:\n value = Expr(arg, context).lll_node\n value = base_type_conversion(value, value.typ, typ, pos)\n holder.append(LLLnode.from_list(['mstore', placeholder, value], typ=typ, location='memory'))\n elif isinstance(typ, ByteArrayLike):\n\n if isinstance(arg, LLLnode): # Is prealloacted variable.\n source_lll = arg\n else:\n source_lll = Expr(arg, context).lll_node\n\n # Set static offset, in arg slot.\n holder.append(LLLnode.from_list(['mstore', placeholder, ['mload', dynamic_offset_counter]]))\n # Get the biginning to write the ByteArray to.\n dest_placeholder = LLLnode.from_list(\n ['add', datamem_start, ['mload', dynamic_offset_counter]],\n typ=typ, location='memory', annotation=\"pack_args_by_32:dest_placeholder\")\n copier = make_byte_array_copier(dest_placeholder, source_lll, pos=pos)\n holder.append(copier)\n # Add zero padding.\n new_maxlen = ceil32(source_lll.typ.maxlen)\n\n holder.append([\n 'with', '_ceil32_end', ['ceil32', ['mload', dest_placeholder]], [\n 'seq', ['with', '_bytearray_loc', dest_placeholder, [\n 'seq', ['repeat', zero_pad_i, ['mload', '_bytearray_loc'], new_maxlen, [\n 'seq',\n # stay within allocated bounds\n ['if', ['ge', ['mload', zero_pad_i], '_ceil32_end'], 'break'],\n [\n 'mstore8',\n ['add', ['add', '_bytearray_loc', 32], ['mload', zero_pad_i]],\n 0,\n ],\n ]],\n ]],\n ]\n ])\n\n # Increment offset counter.\n increment_counter = LLLnode.from_list([\n 'mstore', dynamic_offset_counter,\n [\n 'add',\n ['add', ['mload', dynamic_offset_counter], ['ceil32', ['mload', dest_placeholder]]],\n 32,\n ],\n ], annotation='Increment dynamic offset counter')\n holder.append(increment_counter)\n elif isinstance(typ, ListType):\n maxlen += (typ.count - 1) * 32\n typ = typ.subtype\n\n def check_list_type_match(provided): # Check list types match.\n if provided != typ:\n raise TypeMismatchException(\n \"Log list type '%s' does not match provided, expected '%s'\" % (provided, typ)\n )\n\n # NOTE: Below code could be refactored into iterators/getter functions for each type of\n # repetitive loop. But seeing how each one is a unique for loop, and in which way\n # the sub value makes the difference in each type of list clearer.\n\n # List from storage\n if isinstance(arg, ast.Attribute) and arg.value.id == 'self':\n stor_list = context.globals[arg.attr]\n check_list_type_match(stor_list.typ.subtype)\n size = stor_list.typ.count\n mem_offset = 0\n for i in range(0, size):\n storage_offset = i\n arg2 = LLLnode.from_list(\n ['sload', ['add', ['sha3_32', Expr(arg, context).lll_node], storage_offset]],\n typ=typ,\n )\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n\n # List from variable.\n elif isinstance(arg, ast.Name):\n size = context.vars[arg.id].size\n pos = context.vars[arg.id].pos\n check_list_type_match(context.vars[arg.id].typ.subtype)\n mem_offset = 0\n for _ in range(0, size):\n arg2 = LLLnode.from_list(\n pos + mem_offset,\n typ=typ,\n location=context.vars[arg.id].location\n )\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n\n # List from list literal.\n else:\n mem_offset = 0\n for arg2 in arg.elts:\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n arg2,\n typ,\n context,\n placeholder + mem_offset,\n pos=pos,\n )\n mem_offset += get_size_of_type(typ) * 32\n return holder, maxlen\n\n\n# Pack logging data arguments\ndef pack_logging_data(expected_data, args, context, pos):\n # Checks to see if there's any data\n if not args:\n return ['seq'], 0, None, 0\n holder = ['seq']\n maxlen = len(args) * 32 # total size of all packed args (upper limit)\n\n # Unroll any function calls, to temp variables.\n prealloacted = {}\n for idx, (arg, _expected_arg) in enumerate(zip(args, expected_data)):\n\n if isinstance(arg, (ast.Str, ast.Call)):\n expr = Expr(arg, context)\n source_lll = expr.lll_node\n typ = source_lll.typ\n\n if isinstance(arg, ast.Str):\n if len(arg.s) > typ.maxlen:\n raise TypeMismatchException(\n \"Data input bytes are to big: %r %r\" % (len(arg.s), typ), pos\n )\n\n tmp_variable = context.new_variable(\n '_log_pack_var_%i_%i' % (arg.lineno, arg.col_offset),\n source_lll.typ,\n )\n tmp_variable_node = LLLnode.from_list(\n tmp_variable,\n typ=source_lll.typ,\n pos=getpos(arg),\n location=\"memory\",\n annotation='log_prealloacted %r' % source_lll.typ,\n )\n # Store len.\n # holder.append(['mstore', len_placeholder, ['mload', unwrap_location(source_lll)]])\n # Copy bytes.\n\n holder.append(\n make_setter(tmp_variable_node, source_lll, pos=getpos(arg), location='memory')\n )\n prealloacted[idx] = tmp_variable_node\n\n requires_dynamic_offset = any([isinstance(data.typ, ByteArrayLike) for data in expected_data])\n if requires_dynamic_offset:\n # Iterator used to zero pad memory.\n zero_pad_i = context.new_placeholder(BaseType('uint256'))\n dynamic_offset_counter = context.new_placeholder(BaseType(32))\n dynamic_placeholder = context.new_placeholder(BaseType(32))\n else:\n dynamic_offset_counter = None\n zero_pad_i = None\n\n # Create placeholder for static args. Note: order of new_*() is important.\n placeholder_map = {}\n for i, (_arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n if not isinstance(typ, ByteArrayLike):\n placeholder = context.new_placeholder(typ)\n else:\n placeholder = context.new_placeholder(BaseType(32))\n placeholder_map[i] = placeholder\n\n # Populate static placeholders.\n for i, (arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n placeholder = placeholder_map[i]\n if not isinstance(typ, ByteArrayLike):\n holder, maxlen = pack_args_by_32(\n holder,\n maxlen,\n prealloacted.get(i, arg),\n typ,\n context,\n placeholder,\n zero_pad_i=zero_pad_i,\n pos=pos,\n )\n\n # Dynamic position starts right after the static args.\n if requires_dynamic_offset:\n holder.append(LLLnode.from_list(['mstore', dynamic_offset_counter, maxlen]))\n\n # Calculate maximum dynamic offset placeholders, used for gas estimation.\n for _arg, data in zip(args, expected_data):\n typ = data.typ\n if isinstance(typ, ByteArrayLike):\n maxlen += 32 + ceil32(typ.maxlen)\n\n if requires_dynamic_offset:\n datamem_start = dynamic_placeholder + 32\n else:\n datamem_start = placeholder_map[0]\n\n # Copy necessary data into allocated dynamic section.\n for i, (arg, data) in enumerate(zip(args, expected_data)):\n typ = data.typ\n if isinstance(typ, ByteArrayLike):\n pack_args_by_32(\n holder=holder,\n maxlen=maxlen,\n arg=prealloacted.get(i, arg),\n typ=typ,\n context=context,\n placeholder=placeholder_map[i],\n datamem_start=datamem_start,\n dynamic_offset_counter=dynamic_offset_counter,\n zero_pad_i=zero_pad_i,\n pos=pos\n )\n\n return holder, maxlen, dynamic_offset_counter, datamem_start\n", "path": "vyper/parser/events.py"}]}
| 3,791 | 158 |
gh_patches_debug_5269
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stuck in "Sorry, we couldn't locate what you requested" state
As a source, after generating a code and clicking the "Continue..." button and going back, then refreshing you get stuck on a page that says "Sorry, we couldn't locate what you requested". Going to any URL causes the same message to be displayed and you have to close the browser to be able to get back to a normal state.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os
3 from datetime import datetime
4 import uuid
5 from functools import wraps
6 import zipfile
7 from cStringIO import StringIO
8 import subprocess
9
10 import logging
11 # This module's logger is explicitly labeled so the correct logger is used,
12 # even when this is run from the command line (e.g. during development)
13 log = logging.getLogger('source')
14
15 from flask import (Flask, request, render_template, session, redirect, url_for,
16 flash, abort, g, send_file)
17 from flask_wtf.csrf import CsrfProtect
18
19 from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
20
21 import config
22 import version
23 import crypto_util
24 import store
25 import background
26 from db import db_session, Source, Submission
27
28 app = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)
29 app.config.from_object(config.FlaskConfig)
30 CsrfProtect(app)
31
32 app.jinja_env.globals['version'] = version.__version__
33 if getattr(config, 'CUSTOM_HEADER_IMAGE', None):
34 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE
35 app.jinja_env.globals['use_custom_header_image'] = True
36 else:
37 app.jinja_env.globals['header_image'] = 'securedrop.png'
38 app.jinja_env.globals['use_custom_header_image'] = False
39
40
41 @app.teardown_appcontext
42 def shutdown_session(exception=None):
43 """Automatically remove database sessions at the end of the request, or
44 when the application shuts down"""
45 db_session.remove()
46
47
48 def logged_in():
49 if 'logged_in' in session:
50 return True
51
52
53 def login_required(f):
54 @wraps(f)
55 def decorated_function(*args, **kwargs):
56 if not logged_in():
57 return redirect(url_for('lookup'))
58 return f(*args, **kwargs)
59 return decorated_function
60
61
62 def ignore_static(f):
63 """Only executes the wrapped function if we're not loading a static resource."""
64 @wraps(f)
65 def decorated_function(*args, **kwargs):
66 if request.path.startswith('/static'):
67 return # don't execute the decorated function
68 return f(*args, **kwargs)
69 return decorated_function
70
71
72 @app.before_request
73 @ignore_static
74 def setup_g():
75 """Store commonly used values in Flask's special g object"""
76 # ignore_static here because `crypto_util.hash_codename` is scrypt (very
77 # time consuming), and we don't need to waste time running if we're just
78 # serving a static resource that won't need to access these common values.
79 if logged_in():
80 g.codename = session['codename']
81 g.sid = crypto_util.hash_codename(g.codename)
82 try:
83 g.source = Source.query.filter(Source.filesystem_id == g.sid).one()
84 except MultipleResultsFound as e:
85 app.logger.error("Found multiple Sources when one was expected: %s" % (e,))
86 abort(500)
87 except NoResultFound as e:
88 app.logger.error("Found no Sources when one was expected: %s" % (e,))
89 abort(404)
90 g.loc = store.path(g.sid)
91
92
93 @app.before_request
94 @ignore_static
95 def check_tor2web():
96 # ignore_static here so we only flash a single message warning about Tor2Web,
97 # corresponding to the intial page load.
98 if 'X-tor2web' in request.headers:
99 flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '
100 'This <strong>does not</strong> provide anonymity. '
101 '<a href="/tor2web-warning">Why is this dangerous?</a>',
102 "header-warning")
103
104
105 @app.route('/')
106 def index():
107 return render_template('index.html')
108
109
110 @app.route('/generate', methods=('GET', 'POST'))
111 def generate():
112 number_words = 8
113 if request.method == 'POST':
114 number_words = int(request.form['number-words'])
115 if number_words not in range(7, 11):
116 abort(403)
117 session['codename'] = crypto_util.genrandomid(number_words)
118 # TODO: make sure this codename isn't a repeat
119 return render_template('generate.html', codename=session['codename'])
120
121
122 @app.route('/create', methods=['POST'])
123 def create():
124 sid = crypto_util.hash_codename(session['codename'])
125
126 source = Source(sid, crypto_util.display_id())
127 db_session.add(source)
128 db_session.commit()
129
130 if os.path.exists(store.path(sid)):
131 # if this happens, we're not using very secure crypto
132 log.warning("Got a duplicate ID '%s'" % sid)
133 else:
134 os.mkdir(store.path(sid))
135
136 session['logged_in'] = True
137 return redirect(url_for('lookup'))
138
139
140 @app.route('/lookup', methods=('GET',))
141 @login_required
142 def lookup():
143 replies = []
144 for fn in os.listdir(g.loc):
145 if fn.startswith('reply-'):
146 try:
147 msg = crypto_util.decrypt(g.sid, g.codename,
148 file(store.path(g.sid, fn)).read()).decode("utf-8")
149 except UnicodeDecodeError:
150 app.logger.error("Could not decode reply %s" % fn)
151 else:
152 date = str(datetime.fromtimestamp(
153 os.stat(store.path(g.sid, fn)).st_mtime))
154 replies.append(dict(id=fn, date=date, msg=msg))
155
156 def async_genkey(sid, codename):
157 with app.app_context():
158 background.execute(lambda: crypto_util.genkeypair(sid, codename))
159
160 # Generate a keypair to encrypt replies from the journalist
161 # Only do this if the journalist has flagged the source as one
162 # that they would like to reply to. (Issue #140.)
163 if not crypto_util.getkey(g.sid) and g.source.flagged:
164 async_genkey(g.sid, g.codename)
165
166 return render_template('lookup.html', codename=g.codename, msgs=replies,
167 flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))
168
169
170 def normalize_timestamps(sid):
171 """
172 Update the timestamps on all of the source's submissions to match that of
173 the latest submission. This minimizes metadata that could be useful to
174 investigators. See #301.
175 """
176 sub_paths = [ store.path(sid, submission.filename)
177 for submission in g.source.submissions ]
178 if len(sub_paths) > 1:
179 args = ["touch"]
180 args.extend(sub_paths[:-1])
181 rc = subprocess.call(args)
182 if rc != 0:
183 app.logger.warning("Couldn't normalize submission timestamps (touch exited with %d)" % rc)
184
185
186 @app.route('/submit', methods=('POST',))
187 @login_required
188 def submit():
189 msg = request.form['msg']
190 fh = request.files['fh']
191 strip_metadata = True if 'notclean' in request.form else False
192
193 fnames = []
194
195 if msg:
196 fnames.append(store.save_message_submission(g.sid, msg))
197 flash("Thanks! We received your message.", "notification")
198 if fh:
199 fnames.append(store.save_file_submission(g.sid, fh.filename,
200 fh.stream, fh.content_type, strip_metadata))
201 flash("Thanks! We received your document '%s'."
202 % fh.filename or '[unnamed]', "notification")
203
204 for fname in fnames:
205 submission = Submission(g.source, fname)
206 db_session.add(submission)
207
208 if g.source.pending:
209 g.source.pending = False
210
211 # Generate a keypair now, if there's enough entropy (issue #303)
212 entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read())
213 if entropy_avail >= 2400:
214 crypto_util.genkeypair(g.sid, g.codename)
215
216 g.source.last_updated = datetime.now()
217 db_session.commit()
218 normalize_timestamps(g.sid)
219
220 return redirect(url_for('lookup'))
221
222
223 @app.route('/delete', methods=('POST',))
224 @login_required
225 def delete():
226 msgid = request.form['msgid']
227 assert '/' not in msgid
228 potential_files = os.listdir(g.loc)
229 if msgid not in potential_files:
230 abort(404) # TODO are the checks necessary?
231 store.secure_unlink(store.path(g.sid, msgid))
232 flash("Reply deleted.", "notification")
233
234 return redirect(url_for('lookup'))
235
236
237 def valid_codename(codename):
238 return os.path.exists(store.path(crypto_util.hash_codename(codename)))
239
240 @app.route('/login', methods=('GET', 'POST'))
241 def login():
242 if request.method == 'POST':
243 codename = request.form['codename']
244 if valid_codename(codename):
245 session.update(codename=codename, logged_in=True)
246 return redirect(url_for('lookup'))
247 else:
248 flash("Sorry, that is not a recognized codename.", "error")
249 return render_template('login.html')
250
251
252 @app.route('/howto-disable-js')
253 def howto_disable_js():
254 return render_template("howto-disable-js.html")
255
256
257 @app.route('/tor2web-warning')
258 def tor2web_warning():
259 return render_template("tor2web-warning.html")
260
261
262 @app.route('/journalist-key')
263 def download_journalist_pubkey():
264 journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)
265 return send_file(StringIO(journalist_pubkey),
266 mimetype="application/pgp-keys",
267 attachment_filename=config.JOURNALIST_KEY + ".asc",
268 as_attachment=True)
269
270
271 @app.route('/why-journalist-key')
272 def why_download_journalist_pubkey():
273 return render_template("why-journalist-key.html")
274
275
276 _REDIRECT_URL_WHITELIST = ["http://tor2web.org/",
277 "https://www.torproject.org/download.html.en",
278 "https://tails.boum.org/",
279 "http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/",
280 "http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document",
281 "https://addons.mozilla.org/en-US/firefox/addon/noscript/",
282 "http://noscript.net"]
283
284
285 @app.route('/redirect/<path:redirect_url>')
286 def redirect_hack(redirect_url):
287 # A hack to avoid referer leakage when a user clicks on an external link.
288 # TODO: Most likely will want to share this between source.py and
289 # journalist.py in the future.
290 if redirect_url not in _REDIRECT_URL_WHITELIST:
291 return 'Redirect not allowed'
292 else:
293 return render_template("redirect.html", redirect_url=redirect_url)
294
295
296 @app.errorhandler(404)
297 def page_not_found(error):
298 return render_template('notfound.html'), 404
299
300 @app.errorhandler(500)
301 def internal_error(error):
302 return render_template('error.html'), 500
303
304 if __name__ == "__main__":
305 # TODO make sure debug is not on in production
306 app.run(debug=True, host='0.0.0.0', port=8080)
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/source.py b/securedrop/source.py
--- a/securedrop/source.py
+++ b/securedrop/source.py
@@ -115,6 +115,7 @@
if number_words not in range(7, 11):
abort(403)
session['codename'] = crypto_util.genrandomid(number_words)
+ session.pop('logged_in', None)
# TODO: make sure this codename isn't a repeat
return render_template('generate.html', codename=session['codename'])
|
{"golden_diff": "diff --git a/securedrop/source.py b/securedrop/source.py\n--- a/securedrop/source.py\n+++ b/securedrop/source.py\n@@ -115,6 +115,7 @@\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n+ session.pop('logged_in', None)\n # TODO: make sure this codename isn't a repeat\n return render_template('generate.html', codename=session['codename'])\n", "issue": "Stuck in \"Sorry, we couldn't locate what you requested\" state\nAs a source, after generating a code and clicking the \"Continue...\" button and going back, then refreshing you get stuck on a page that says \"Sorry, we couldn't locate what you requested\". Going to any URL causes the same message to be displayed and you have to close the browser to be able to get back to a normal state.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\nimport subprocess\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nfrom db import db_session, Source, Submission\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n abort(404)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n # TODO: make sure this codename isn't a repeat\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n db_session.commit()\n\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n log.warning(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.startswith('reply-'):\n try:\n msg = crypto_util.decrypt(g.sid, g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = str(datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime))\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template('lookup.html', codename=g.codename, msgs=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\ndef normalize_timestamps(sid):\n \"\"\"\n Update the timestamps on all of the source's submissions to match that of\n the latest submission. This minimizes metadata that could be useful to\n investigators. See #301.\n \"\"\"\n sub_paths = [ store.path(sid, submission.filename)\n for submission in g.source.submissions ]\n if len(sub_paths) > 1:\n args = [\"touch\"]\n args.extend(sub_paths[:-1])\n rc = subprocess.call(args)\n if rc != 0:\n app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n strip_metadata = True if 'notclean' in request.form else False\n\n fnames = []\n\n if msg:\n fnames.append(store.save_message_submission(g.sid, msg))\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n fnames.append(store.save_file_submission(g.sid, fh.filename,\n fh.stream, fh.content_type, strip_metadata))\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read())\n if entropy_avail >= 2400:\n crypto_util.genkeypair(g.sid, g.codename)\n\n g.source.last_updated = datetime.now()\n db_session.commit()\n normalize_timestamps(g.sid)\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\n_REDIRECT_URL_WHITELIST = [\"http://tor2web.org/\",\n \"https://www.torproject.org/download.html.en\",\n \"https://tails.boum.org/\",\n \"http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/\",\n \"http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document\",\n \"https://addons.mozilla.org/en-US/firefox/addon/noscript/\",\n \"http://noscript.net\"]\n\n\[email protected]('/redirect/<path:redirect_url>')\ndef redirect_hack(redirect_url):\n # A hack to avoid referer leakage when a user clicks on an external link.\n # TODO: Most likely will want to share this between source.py and\n # journalist.py in the future.\n if redirect_url not in _REDIRECT_URL_WHITELIST:\n return 'Redirect not allowed'\n else:\n return render_template(\"redirect.html\", redirect_url=redirect_url)\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n", "path": "securedrop/source.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\nimport zipfile\nfrom cStringIO import StringIO\nimport subprocess\n\nimport logging\n# This module's logger is explicitly labeled so the correct logger is used,\n# even when this is run from the command line (e.g. during development)\nlog = logging.getLogger('source')\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nfrom db import db_session, Source, Submission\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\[email protected]_appcontext\ndef shutdown_session(exception=None):\n \"\"\"Automatically remove database sessions at the end of the request, or\n when the application shuts down\"\"\"\n db_session.remove()\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.hash_codename` is scrypt (very\n # time consuming), and we don't need to waste time running if we're just\n # serving a static resource that won't need to access these common values.\n if logged_in():\n g.codename = session['codename']\n g.sid = crypto_util.hash_codename(g.codename)\n try:\n g.source = Source.query.filter(Source.filesystem_id == g.sid).one()\n except MultipleResultsFound as e:\n app.logger.error(\"Found multiple Sources when one was expected: %s\" % (e,))\n abort(500)\n except NoResultFound as e:\n app.logger.error(\"Found no Sources when one was expected: %s\" % (e,))\n abort(404)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n session.pop('logged_in', None)\n # TODO: make sure this codename isn't a repeat\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.hash_codename(session['codename'])\n\n source = Source(sid, crypto_util.display_id())\n db_session.add(source)\n db_session.commit()\n\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n log.warning(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n\n session['logged_in'] = True\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n replies = []\n for fn in os.listdir(g.loc):\n if fn.startswith('reply-'):\n try:\n msg = crypto_util.decrypt(g.sid, g.codename,\n file(store.path(g.sid, fn)).read()).decode(\"utf-8\")\n except UnicodeDecodeError:\n app.logger.error(\"Could not decode reply %s\" % fn)\n else:\n date = str(datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime))\n replies.append(dict(id=fn, date=date, msg=msg))\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and g.source.flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template('lookup.html', codename=g.codename, msgs=replies,\n flagged=g.source.flagged, haskey=crypto_util.getkey(g.sid))\n\n\ndef normalize_timestamps(sid):\n \"\"\"\n Update the timestamps on all of the source's submissions to match that of\n the latest submission. This minimizes metadata that could be useful to\n investigators. See #301.\n \"\"\"\n sub_paths = [ store.path(sid, submission.filename)\n for submission in g.source.submissions ]\n if len(sub_paths) > 1:\n args = [\"touch\"]\n args.extend(sub_paths[:-1])\n rc = subprocess.call(args)\n if rc != 0:\n app.logger.warning(\"Couldn't normalize submission timestamps (touch exited with %d)\" % rc)\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n strip_metadata = True if 'notclean' in request.form else False\n\n fnames = []\n\n if msg:\n fnames.append(store.save_message_submission(g.sid, msg))\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n fnames.append(store.save_file_submission(g.sid, fh.filename,\n fh.stream, fh.content_type, strip_metadata))\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n for fname in fnames:\n submission = Submission(g.source, fname)\n db_session.add(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n entropy_avail = int(open('/proc/sys/kernel/random/entropy_avail').read())\n if entropy_avail >= 2400:\n crypto_util.genkeypair(g.sid, g.codename)\n\n g.source.last_updated = datetime.now()\n db_session.commit()\n normalize_timestamps(g.sid)\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n store.secure_unlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.hash_codename(codename)))\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\n_REDIRECT_URL_WHITELIST = [\"http://tor2web.org/\",\n \"https://www.torproject.org/download.html.en\",\n \"https://tails.boum.org/\",\n \"http://www.wired.com/threatlevel/2013/09/freedom-hosting-fbi/\",\n \"http://www.theguardian.com/world/interactive/2013/oct/04/egotistical-giraffe-nsa-tor-document\",\n \"https://addons.mozilla.org/en-US/firefox/addon/noscript/\",\n \"http://noscript.net\"]\n\n\[email protected]('/redirect/<path:redirect_url>')\ndef redirect_hack(redirect_url):\n # A hack to avoid referer leakage when a user clicks on an external link.\n # TODO: Most likely will want to share this between source.py and\n # journalist.py in the future.\n if redirect_url not in _REDIRECT_URL_WHITELIST:\n return 'Redirect not allowed'\n else:\n return render_template(\"redirect.html\", redirect_url=redirect_url)\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\[email protected](500)\ndef internal_error(error):\n return render_template('error.html'), 500\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, host='0.0.0.0', port=8080)\n", "path": "securedrop/source.py"}]}
| 3,589 | 121 |
gh_patches_debug_17750
|
rasdani/github-patches
|
git_diff
|
canonical__microk8s-2195
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indentation error in yaml output of microk8s.status
The output of `microk8s.status` is
```
microk8s:
running: False
high-availability:
enabled: False
message: microk8s is not running. Use microk8s inspect for a deeper inspection.
```
which confuses some parsers (e.g. the built-in Python 3.8) due to the extraneous space before `message`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/wrappers/status.py`
Content:
```
1 #!/usr/bin/python3
2 import os
3 import argparse
4
5 from common.utils import (
6 exit_if_no_permission,
7 exit_if_stopped,
8 is_cluster_locked,
9 is_ha_enabled,
10 get_dqlite_info,
11 wait_for_ready,
12 is_cluster_ready,
13 get_available_addons,
14 get_current_arch,
15 get_addon_by_name,
16 kubectl_get,
17 kubectl_get_clusterroles,
18 )
19
20
21 def is_enabled(addon, item):
22 if addon in item:
23 return True
24 else:
25 filepath = os.path.expandvars(addon)
26 return os.path.isfile(filepath)
27
28 return False
29
30
31 def print_short(isReady, enabled_addons, disabled_addons):
32 if isReady:
33 print("microk8s is running")
34 print("addons:")
35 if enabled_addons and len(enabled_addons) > 0:
36 for enabled in enabled_addons:
37 print("{}: enabled".format(enabled["name"]))
38 if disabled_addons and len(disabled_addons) > 0:
39 for disabled in disabled_addons:
40 print("{}: disabled".format(disabled["name"]))
41 else:
42 print("microk8s is not running. Use microk8s inspect for a deeper inspection.")
43
44
45 def print_pretty(isReady, enabled_addons, disabled_addons):
46 console_formatter = "{:>3} {:<20} # {}"
47 if isReady:
48 print("microk8s is running")
49 if not is_ha_enabled():
50 print("high-availability: no")
51 else:
52 info = get_dqlite_info()
53 if ha_cluster_formed(info):
54 print("high-availability: yes")
55 else:
56 print("high-availability: no")
57
58 masters = "none"
59 standby = "none"
60 for node in info:
61 if node[1] == "voter":
62 if masters == "none":
63 masters = "{}".format(node[0])
64 else:
65 masters = "{} {}".format(masters, node[0])
66 if node[1] == "standby":
67 if standby == "none":
68 standby = "{}".format(node[0])
69 else:
70 standby = "{} {}".format(standby, node[0])
71
72 print("{:>2}{} {}".format("", "datastore master nodes:", masters))
73 print("{:>2}{} {}".format("", "datastore standby nodes:", standby))
74
75 print("addons:")
76 if enabled_addons and len(enabled_addons) > 0:
77 print('{:>2}{}'.format("", "enabled:"))
78 for enabled in enabled_addons:
79 print(console_formatter.format("", enabled["name"], enabled["description"]))
80 if disabled_addons and len(disabled_addons) > 0:
81 print('{:>2}{}'.format("", "disabled:"))
82 for disabled in disabled_addons:
83 print(console_formatter.format("", disabled["name"], disabled["description"]))
84 else:
85 print("microk8s is not running. Use microk8s inspect for a deeper inspection.")
86
87
88 def print_short_yaml(isReady, enabled_addons, disabled_addons):
89 print("microk8s:")
90 print("{:>2}{} {}".format("", "running:", isReady))
91
92 if isReady:
93 print("addons:")
94 for enabled in enabled_addons:
95 print(" {}: enabled".format(enabled["name"]))
96
97 for disabled in disabled_addons:
98 print(" {}: disabled".format(disabled["name"]))
99 else:
100 print(
101 "{:>2} {} {}".format(
102 "",
103 "message:",
104 "microk8s is not running. Use microk8s inspect for a deeper inspection.",
105 )
106 )
107
108
109 def print_yaml(isReady, enabled_addons, disabled_addons):
110 print("microk8s:")
111 print("{:>2}{} {}".format("", "running:", isReady))
112
113 print("{:>2}".format("high-availability:"))
114 ha_enabled = is_ha_enabled()
115 print("{:>2}{} {}".format("", "enabled:", ha_enabled))
116 if ha_enabled:
117 info = get_dqlite_info()
118 print("{:>2}{}".format("", "nodes:"))
119 for node in info:
120 print("{:>6}address: {:<1}".format("- ", node[0]))
121 print("{:>6}role: {:<1}".format("", node[1]))
122
123 if isReady:
124 print("{:>2}".format("addons:"))
125 for enabled in enabled_addons:
126 print("{:>4}name: {:<1}".format("- ", enabled["name"]))
127 print("{:>4}description: {:<1}".format("", enabled["description"]))
128 print("{:>4}version: {:<1}".format("", enabled["version"]))
129 print("{:>4}status: enabled".format(""))
130
131 for disabled in disabled_addons:
132 print("{:>4}name: {:<1}".format("- ", disabled["name"]))
133 print("{:>4}description: {:<1}".format("", disabled["description"]))
134 print("{:>4}version: {:<1}".format("", disabled["version"]))
135 print("{:>4}status: disabled".format(""))
136 else:
137 print(
138 "{:>2} {} {}".format(
139 "",
140 "message:",
141 "microk8s is not running. Use microk8s inspect for a deeper inspection.",
142 )
143 )
144
145
146 def print_addon_status(enabled):
147 if len(enabled) > 0:
148 print("enabled")
149 else:
150 print("disabled")
151
152
153 def get_status(available_addons, isReady):
154 enabled = []
155 disabled = []
156 if isReady:
157 kube_output = kubectl_get("all")
158 cluster_output = kubectl_get_clusterroles()
159 kube_output = kube_output + cluster_output
160 for addon in available_addons:
161 found = False
162 for row in kube_output.split('\n'):
163 if is_enabled(addon["check_status"], row):
164 enabled.append(addon)
165 found = True
166 break
167 if not found:
168 disabled.append(addon)
169
170 return enabled, disabled
171
172
173 def ha_cluster_formed(info):
174 voters = 0
175 for node in info:
176 if node[1] == "voter":
177 voters += 1
178 ha_formed = False
179 if voters > 2:
180 ha_formed = True
181 return ha_formed
182
183
184 if __name__ == '__main__':
185 exit_if_no_permission()
186 exit_if_stopped()
187 is_cluster_locked()
188
189 # initiate the parser with a description
190 parser = argparse.ArgumentParser(
191 description='Microk8s cluster status check.', prog='microk8s status'
192 )
193 parser.add_argument(
194 "--format",
195 help="print cluster and addon status, output can be in yaml, pretty or short",
196 default="pretty",
197 choices={"pretty", "yaml", "short"},
198 )
199 parser.add_argument(
200 "-w", "--wait-ready", action='store_true', help="wait until the cluster is in ready state"
201 )
202 parser.add_argument(
203 "-t",
204 "--timeout",
205 help="specify a timeout in seconds when waiting for the cluster to be ready.",
206 type=int,
207 default=0,
208 )
209 parser.add_argument("-a", "--addon", help="check the status of an addon.", default="all")
210 parser.add_argument(
211 "--yaml", action='store_true', help="DEPRECATED, use '--format yaml' instead"
212 )
213
214 # read arguments from the command line
215 args = parser.parse_args()
216
217 wait_ready = args.wait_ready
218 timeout = args.timeout
219 yaml_short = args.yaml
220
221 if wait_ready:
222 isReady = wait_for_ready(wait_ready, timeout)
223 else:
224 isReady = is_cluster_ready()
225
226 available_addons = get_available_addons(get_current_arch())
227
228 if args.addon != "all":
229 available_addons = get_addon_by_name(available_addons, args.addon)
230
231 enabled, disabled = get_status(available_addons, isReady)
232
233 if args.addon != "all":
234 print_addon_status(enabled)
235 else:
236 if args.format == "yaml":
237 print_yaml(isReady, enabled, disabled)
238 elif args.format == "short":
239 print_short(isReady, enabled, disabled)
240 else:
241 if yaml_short:
242 print_short_yaml(isReady, enabled, disabled)
243 else:
244 print_pretty(isReady, enabled, disabled)
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/wrappers/status.py b/scripts/wrappers/status.py
--- a/scripts/wrappers/status.py
+++ b/scripts/wrappers/status.py
@@ -98,7 +98,7 @@
print(" {}: disabled".format(disabled["name"]))
else:
print(
- "{:>2} {} {}".format(
+ "{:>2}{} {}".format(
"",
"message:",
"microk8s is not running. Use microk8s inspect for a deeper inspection.",
@@ -135,7 +135,7 @@
print("{:>4}status: disabled".format(""))
else:
print(
- "{:>2} {} {}".format(
+ "{:>2}{} {}".format(
"",
"message:",
"microk8s is not running. Use microk8s inspect for a deeper inspection.",
|
{"golden_diff": "diff --git a/scripts/wrappers/status.py b/scripts/wrappers/status.py\n--- a/scripts/wrappers/status.py\n+++ b/scripts/wrappers/status.py\n@@ -98,7 +98,7 @@\n print(\" {}: disabled\".format(disabled[\"name\"]))\n else:\n print(\n- \"{:>2} {} {}\".format(\n+ \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n@@ -135,7 +135,7 @@\n print(\"{:>4}status: disabled\".format(\"\"))\n else:\n print(\n- \"{:>2} {} {}\".format(\n+ \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n", "issue": "Indentation error in yaml output of microk8s.status\nThe output of `microk8s.status` is\r\n\r\n```\r\nmicrok8s:\r\n running: False\r\nhigh-availability:\r\n enabled: False\r\n message: microk8s is not running. Use microk8s inspect for a deeper inspection.\r\n```\r\n\r\nwhich confuses some parsers (e.g. the built-in Python 3.8) due to the extraneous space before `message`.\n", "before_files": [{"content": "#!/usr/bin/python3\nimport os\nimport argparse\n\nfrom common.utils import (\n exit_if_no_permission,\n exit_if_stopped,\n is_cluster_locked,\n is_ha_enabled,\n get_dqlite_info,\n wait_for_ready,\n is_cluster_ready,\n get_available_addons,\n get_current_arch,\n get_addon_by_name,\n kubectl_get,\n kubectl_get_clusterroles,\n)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n return False\n\n\ndef print_short(isReady, enabled_addons, disabled_addons):\n if isReady:\n print(\"microk8s is running\")\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n for enabled in enabled_addons:\n print(\"{}: enabled\".format(enabled[\"name\"]))\n if disabled_addons and len(disabled_addons) > 0:\n for disabled in disabled_addons:\n print(\"{}: disabled\".format(disabled[\"name\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_pretty(isReady, enabled_addons, disabled_addons):\n console_formatter = \"{:>3} {:<20} # {}\"\n if isReady:\n print(\"microk8s is running\")\n if not is_ha_enabled():\n print(\"high-availability: no\")\n else:\n info = get_dqlite_info()\n if ha_cluster_formed(info):\n print(\"high-availability: yes\")\n else:\n print(\"high-availability: no\")\n\n masters = \"none\"\n standby = \"none\"\n for node in info:\n if node[1] == \"voter\":\n if masters == \"none\":\n masters = \"{}\".format(node[0])\n else:\n masters = \"{} {}\".format(masters, node[0])\n if node[1] == \"standby\":\n if standby == \"none\":\n standby = \"{}\".format(node[0])\n else:\n standby = \"{} {}\".format(standby, node[0])\n\n print(\"{:>2}{} {}\".format(\"\", \"datastore master nodes:\", masters))\n print(\"{:>2}{} {}\".format(\"\", \"datastore standby nodes:\", standby))\n\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n print('{:>2}{}'.format(\"\", \"enabled:\"))\n for enabled in enabled_addons:\n print(console_formatter.format(\"\", enabled[\"name\"], enabled[\"description\"]))\n if disabled_addons and len(disabled_addons) > 0:\n print('{:>2}{}'.format(\"\", \"disabled:\"))\n for disabled in disabled_addons:\n print(console_formatter.format(\"\", disabled[\"name\"], disabled[\"description\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_short_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n if isReady:\n print(\"addons:\")\n for enabled in enabled_addons:\n print(\" {}: enabled\".format(enabled[\"name\"]))\n\n for disabled in disabled_addons:\n print(\" {}: disabled\".format(disabled[\"name\"]))\n else:\n print(\n \"{:>2} {} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n print(\"{:>2}\".format(\"high-availability:\"))\n ha_enabled = is_ha_enabled()\n print(\"{:>2}{} {}\".format(\"\", \"enabled:\", ha_enabled))\n if ha_enabled:\n info = get_dqlite_info()\n print(\"{:>2}{}\".format(\"\", \"nodes:\"))\n for node in info:\n print(\"{:>6}address: {:<1}\".format(\"- \", node[0]))\n print(\"{:>6}role: {:<1}\".format(\"\", node[1]))\n\n if isReady:\n print(\"{:>2}\".format(\"addons:\"))\n for enabled in enabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", enabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", enabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", enabled[\"version\"]))\n print(\"{:>4}status: enabled\".format(\"\"))\n\n for disabled in disabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", disabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", disabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", disabled[\"version\"]))\n print(\"{:>4}status: disabled\".format(\"\"))\n else:\n print(\n \"{:>2} {} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_addon_status(enabled):\n if len(enabled) > 0:\n print(\"enabled\")\n else:\n print(\"disabled\")\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n kube_output = kubectl_get(\"all\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split('\\n'):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef ha_cluster_formed(info):\n voters = 0\n for node in info:\n if node[1] == \"voter\":\n voters += 1\n ha_formed = False\n if voters > 2:\n ha_formed = True\n return ha_formed\n\n\nif __name__ == '__main__':\n exit_if_no_permission()\n exit_if_stopped()\n is_cluster_locked()\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description='Microk8s cluster status check.', prog='microk8s status'\n )\n parser.add_argument(\n \"--format\",\n help=\"print cluster and addon status, output can be in yaml, pretty or short\",\n default=\"pretty\",\n choices={\"pretty\", \"yaml\", \"short\"},\n )\n parser.add_argument(\n \"-w\", \"--wait-ready\", action='store_true', help=\"wait until the cluster is in ready state\"\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n help=\"specify a timeout in seconds when waiting for the cluster to be ready.\",\n type=int,\n default=0,\n )\n parser.add_argument(\"-a\", \"--addon\", help=\"check the status of an addon.\", default=\"all\")\n parser.add_argument(\n \"--yaml\", action='store_true', help=\"DEPRECATED, use '--format yaml' instead\"\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n wait_ready = args.wait_ready\n timeout = args.timeout\n yaml_short = args.yaml\n\n if wait_ready:\n isReady = wait_for_ready(wait_ready, timeout)\n else:\n isReady = is_cluster_ready()\n\n available_addons = get_available_addons(get_current_arch())\n\n if args.addon != \"all\":\n available_addons = get_addon_by_name(available_addons, args.addon)\n\n enabled, disabled = get_status(available_addons, isReady)\n\n if args.addon != \"all\":\n print_addon_status(enabled)\n else:\n if args.format == \"yaml\":\n print_yaml(isReady, enabled, disabled)\n elif args.format == \"short\":\n print_short(isReady, enabled, disabled)\n else:\n if yaml_short:\n print_short_yaml(isReady, enabled, disabled)\n else:\n print_pretty(isReady, enabled, disabled)\n", "path": "scripts/wrappers/status.py"}], "after_files": [{"content": "#!/usr/bin/python3\nimport os\nimport argparse\n\nfrom common.utils import (\n exit_if_no_permission,\n exit_if_stopped,\n is_cluster_locked,\n is_ha_enabled,\n get_dqlite_info,\n wait_for_ready,\n is_cluster_ready,\n get_available_addons,\n get_current_arch,\n get_addon_by_name,\n kubectl_get,\n kubectl_get_clusterroles,\n)\n\n\ndef is_enabled(addon, item):\n if addon in item:\n return True\n else:\n filepath = os.path.expandvars(addon)\n return os.path.isfile(filepath)\n\n return False\n\n\ndef print_short(isReady, enabled_addons, disabled_addons):\n if isReady:\n print(\"microk8s is running\")\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n for enabled in enabled_addons:\n print(\"{}: enabled\".format(enabled[\"name\"]))\n if disabled_addons and len(disabled_addons) > 0:\n for disabled in disabled_addons:\n print(\"{}: disabled\".format(disabled[\"name\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_pretty(isReady, enabled_addons, disabled_addons):\n console_formatter = \"{:>3} {:<20} # {}\"\n if isReady:\n print(\"microk8s is running\")\n if not is_ha_enabled():\n print(\"high-availability: no\")\n else:\n info = get_dqlite_info()\n if ha_cluster_formed(info):\n print(\"high-availability: yes\")\n else:\n print(\"high-availability: no\")\n\n masters = \"none\"\n standby = \"none\"\n for node in info:\n if node[1] == \"voter\":\n if masters == \"none\":\n masters = \"{}\".format(node[0])\n else:\n masters = \"{} {}\".format(masters, node[0])\n if node[1] == \"standby\":\n if standby == \"none\":\n standby = \"{}\".format(node[0])\n else:\n standby = \"{} {}\".format(standby, node[0])\n\n print(\"{:>2}{} {}\".format(\"\", \"datastore master nodes:\", masters))\n print(\"{:>2}{} {}\".format(\"\", \"datastore standby nodes:\", standby))\n\n print(\"addons:\")\n if enabled_addons and len(enabled_addons) > 0:\n print('{:>2}{}'.format(\"\", \"enabled:\"))\n for enabled in enabled_addons:\n print(console_formatter.format(\"\", enabled[\"name\"], enabled[\"description\"]))\n if disabled_addons and len(disabled_addons) > 0:\n print('{:>2}{}'.format(\"\", \"disabled:\"))\n for disabled in disabled_addons:\n print(console_formatter.format(\"\", disabled[\"name\"], disabled[\"description\"]))\n else:\n print(\"microk8s is not running. Use microk8s inspect for a deeper inspection.\")\n\n\ndef print_short_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n if isReady:\n print(\"addons:\")\n for enabled in enabled_addons:\n print(\" {}: enabled\".format(enabled[\"name\"]))\n\n for disabled in disabled_addons:\n print(\" {}: disabled\".format(disabled[\"name\"]))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_yaml(isReady, enabled_addons, disabled_addons):\n print(\"microk8s:\")\n print(\"{:>2}{} {}\".format(\"\", \"running:\", isReady))\n\n print(\"{:>2}\".format(\"high-availability:\"))\n ha_enabled = is_ha_enabled()\n print(\"{:>2}{} {}\".format(\"\", \"enabled:\", ha_enabled))\n if ha_enabled:\n info = get_dqlite_info()\n print(\"{:>2}{}\".format(\"\", \"nodes:\"))\n for node in info:\n print(\"{:>6}address: {:<1}\".format(\"- \", node[0]))\n print(\"{:>6}role: {:<1}\".format(\"\", node[1]))\n\n if isReady:\n print(\"{:>2}\".format(\"addons:\"))\n for enabled in enabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", enabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", enabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", enabled[\"version\"]))\n print(\"{:>4}status: enabled\".format(\"\"))\n\n for disabled in disabled_addons:\n print(\"{:>4}name: {:<1}\".format(\"- \", disabled[\"name\"]))\n print(\"{:>4}description: {:<1}\".format(\"\", disabled[\"description\"]))\n print(\"{:>4}version: {:<1}\".format(\"\", disabled[\"version\"]))\n print(\"{:>4}status: disabled\".format(\"\"))\n else:\n print(\n \"{:>2}{} {}\".format(\n \"\",\n \"message:\",\n \"microk8s is not running. Use microk8s inspect for a deeper inspection.\",\n )\n )\n\n\ndef print_addon_status(enabled):\n if len(enabled) > 0:\n print(\"enabled\")\n else:\n print(\"disabled\")\n\n\ndef get_status(available_addons, isReady):\n enabled = []\n disabled = []\n if isReady:\n kube_output = kubectl_get(\"all\")\n cluster_output = kubectl_get_clusterroles()\n kube_output = kube_output + cluster_output\n for addon in available_addons:\n found = False\n for row in kube_output.split('\\n'):\n if is_enabled(addon[\"check_status\"], row):\n enabled.append(addon)\n found = True\n break\n if not found:\n disabled.append(addon)\n\n return enabled, disabled\n\n\ndef ha_cluster_formed(info):\n voters = 0\n for node in info:\n if node[1] == \"voter\":\n voters += 1\n ha_formed = False\n if voters > 2:\n ha_formed = True\n return ha_formed\n\n\nif __name__ == '__main__':\n exit_if_no_permission()\n exit_if_stopped()\n is_cluster_locked()\n\n # initiate the parser with a description\n parser = argparse.ArgumentParser(\n description='Microk8s cluster status check.', prog='microk8s status'\n )\n parser.add_argument(\n \"--format\",\n help=\"print cluster and addon status, output can be in yaml, pretty or short\",\n default=\"pretty\",\n choices={\"pretty\", \"yaml\", \"short\"},\n )\n parser.add_argument(\n \"-w\", \"--wait-ready\", action='store_true', help=\"wait until the cluster is in ready state\"\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n help=\"specify a timeout in seconds when waiting for the cluster to be ready.\",\n type=int,\n default=0,\n )\n parser.add_argument(\"-a\", \"--addon\", help=\"check the status of an addon.\", default=\"all\")\n parser.add_argument(\n \"--yaml\", action='store_true', help=\"DEPRECATED, use '--format yaml' instead\"\n )\n\n # read arguments from the command line\n args = parser.parse_args()\n\n wait_ready = args.wait_ready\n timeout = args.timeout\n yaml_short = args.yaml\n\n if wait_ready:\n isReady = wait_for_ready(wait_ready, timeout)\n else:\n isReady = is_cluster_ready()\n\n available_addons = get_available_addons(get_current_arch())\n\n if args.addon != \"all\":\n available_addons = get_addon_by_name(available_addons, args.addon)\n\n enabled, disabled = get_status(available_addons, isReady)\n\n if args.addon != \"all\":\n print_addon_status(enabled)\n else:\n if args.format == \"yaml\":\n print_yaml(isReady, enabled, disabled)\n elif args.format == \"short\":\n print_short(isReady, enabled, disabled)\n else:\n if yaml_short:\n print_short_yaml(isReady, enabled, disabled)\n else:\n print_pretty(isReady, enabled, disabled)\n", "path": "scripts/wrappers/status.py"}]}
| 2,806 | 193 |
gh_patches_debug_2733
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-805
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ADMIN - Tronçon bouclant sur lui-même
Impossible de saisir le CIRCUIT DES LACS correctement.
Renvoie souvent une 504 BAD GATEWAY quand on enregistre. L'itinéraire a pourtant été modifié mais différemment de la façon dont il a été saisi. A creuser.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/core/forms.py`
Content:
```
1 from django.utils.translation import ugettext_lazy as _
2
3 import floppyforms as forms
4
5 from geotrek.common.forms import CommonForm
6 from .models import Path
7 from .helpers import PathHelper
8 from .fields import TopologyField, SnappedLineStringField
9
10
11 class TopologyForm(CommonForm):
12 """
13 This form is a bit specific :
14
15 We use a field (topology) in order to edit the whole instance.
16 Thus, at init, we load the instance into field, and at save, we
17 save the field into the instance.
18
19 The geom field is fully ignored, since we edit a topology.
20 """
21 topology = TopologyField(label="")
22
23 def __init__(self, *args, **kwargs):
24 super(TopologyForm, self).__init__(*args, **kwargs)
25 if self.instance and self.instance.pk:
26 self.fields['topology'].initial = self.instance
27
28 def clean(self, *args, **kwargs):
29 data = super(TopologyForm, self).clean()
30 # geom is computed at db-level and never edited
31 if 'geom' in self.errors:
32 del self.errors['geom']
33 return data
34
35 def save(self, *args, **kwargs):
36 topology = self.cleaned_data.pop('topology')
37 instance = super(TopologyForm, self).save(*args, **kwargs)
38 instance.mutate(topology)
39 return instance
40
41 geomfields = ['topology']
42
43 class Meta(CommonForm.Meta):
44 fields = CommonForm.Meta.fields + ['topology']
45
46 MEDIA_JS = ("core/dijkstra.js",
47 "core/leaflet-geomutils.js",
48 "core/multipath.js",
49 "core/topology_helper.js") + CommonForm.MEDIA_JS
50
51
52 class PathForm(CommonForm):
53 geom = SnappedLineStringField()
54
55 reverse_geom = forms.BooleanField(required=False,
56 label=_("Reverse path"),
57 help_text=_("The path will be reversed once saved"))
58
59 geomfields = ['geom']
60
61 class Meta(CommonForm.Meta):
62 model = Path
63 fields = CommonForm.Meta.fields + \
64 ['structure',
65 'name', 'stake', 'comfort', 'trail', 'departure', 'arrival', 'comments',
66 'datasource', 'networks', 'usages', 'valid', 'reverse_geom', 'geom']
67
68 def __init__(self, *args, **kwargs):
69 super(PathForm, self).__init__(*args, **kwargs)
70 self.fields['geom'].label = ''
71
72 def clean_geom(self):
73 geom = self.cleaned_data['geom']
74 if geom is None:
75 raise forms.ValidationError(_("Invalid snapped geometry."))
76 if not geom.simple:
77 raise forms.ValidationError(_("Geometry is not simple."))
78 if not PathHelper.disjoint(geom, self.cleaned_data.get('pk') or -1):
79 raise forms.ValidationError(_("Geometry overlaps another."))
80 return geom
81
82 def save(self, commit=True):
83 path = super(PathForm, self).save(commit=False)
84
85 if self.cleaned_data.get('reverse_geom'):
86 path.reverse()
87
88 if commit:
89 path.save()
90 self.save_m2m()
91
92 return path
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geotrek/core/forms.py b/geotrek/core/forms.py
--- a/geotrek/core/forms.py
+++ b/geotrek/core/forms.py
@@ -44,7 +44,6 @@
fields = CommonForm.Meta.fields + ['topology']
MEDIA_JS = ("core/dijkstra.js",
- "core/leaflet-geomutils.js",
"core/multipath.js",
"core/topology_helper.js") + CommonForm.MEDIA_JS
|
{"golden_diff": "diff --git a/geotrek/core/forms.py b/geotrek/core/forms.py\n--- a/geotrek/core/forms.py\n+++ b/geotrek/core/forms.py\n@@ -44,7 +44,6 @@\n fields = CommonForm.Meta.fields + ['topology']\n \n MEDIA_JS = (\"core/dijkstra.js\",\n- \"core/leaflet-geomutils.js\",\n \"core/multipath.js\",\n \"core/topology_helper.js\") + CommonForm.MEDIA_JS\n", "issue": "ADMIN - Tron\u00e7on bouclant sur lui-m\u00eame\nImpossible de saisir le CIRCUIT DES LACS correctement.\nRenvoie souvent une 504 BAD GATEWAY quand on enregistre. L'itin\u00e9raire a pourtant \u00e9t\u00e9 modifi\u00e9 mais diff\u00e9remment de la fa\u00e7on dont il a \u00e9t\u00e9 saisi. A creuser.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport floppyforms as forms\n\nfrom geotrek.common.forms import CommonForm\nfrom .models import Path\nfrom .helpers import PathHelper\nfrom .fields import TopologyField, SnappedLineStringField\n\n\nclass TopologyForm(CommonForm):\n \"\"\"\n This form is a bit specific :\n\n We use a field (topology) in order to edit the whole instance.\n Thus, at init, we load the instance into field, and at save, we\n save the field into the instance.\n\n The geom field is fully ignored, since we edit a topology.\n \"\"\"\n topology = TopologyField(label=\"\")\n\n def __init__(self, *args, **kwargs):\n super(TopologyForm, self).__init__(*args, **kwargs)\n if self.instance and self.instance.pk:\n self.fields['topology'].initial = self.instance\n\n def clean(self, *args, **kwargs):\n data = super(TopologyForm, self).clean()\n # geom is computed at db-level and never edited\n if 'geom' in self.errors:\n del self.errors['geom']\n return data\n\n def save(self, *args, **kwargs):\n topology = self.cleaned_data.pop('topology')\n instance = super(TopologyForm, self).save(*args, **kwargs)\n instance.mutate(topology)\n return instance\n\n geomfields = ['topology']\n\n class Meta(CommonForm.Meta):\n fields = CommonForm.Meta.fields + ['topology']\n\n MEDIA_JS = (\"core/dijkstra.js\",\n \"core/leaflet-geomutils.js\",\n \"core/multipath.js\",\n \"core/topology_helper.js\") + CommonForm.MEDIA_JS\n\n\nclass PathForm(CommonForm):\n geom = SnappedLineStringField()\n\n reverse_geom = forms.BooleanField(required=False,\n label=_(\"Reverse path\"),\n help_text=_(\"The path will be reversed once saved\"))\n\n geomfields = ['geom']\n\n class Meta(CommonForm.Meta):\n model = Path\n fields = CommonForm.Meta.fields + \\\n ['structure',\n 'name', 'stake', 'comfort', 'trail', 'departure', 'arrival', 'comments',\n 'datasource', 'networks', 'usages', 'valid', 'reverse_geom', 'geom']\n\n def __init__(self, *args, **kwargs):\n super(PathForm, self).__init__(*args, **kwargs)\n self.fields['geom'].label = ''\n\n def clean_geom(self):\n geom = self.cleaned_data['geom']\n if geom is None:\n raise forms.ValidationError(_(\"Invalid snapped geometry.\"))\n if not geom.simple:\n raise forms.ValidationError(_(\"Geometry is not simple.\"))\n if not PathHelper.disjoint(geom, self.cleaned_data.get('pk') or -1):\n raise forms.ValidationError(_(\"Geometry overlaps another.\"))\n return geom\n\n def save(self, commit=True):\n path = super(PathForm, self).save(commit=False)\n\n if self.cleaned_data.get('reverse_geom'):\n path.reverse()\n\n if commit:\n path.save()\n self.save_m2m()\n\n return path\n", "path": "geotrek/core/forms.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport floppyforms as forms\n\nfrom geotrek.common.forms import CommonForm\nfrom .models import Path\nfrom .helpers import PathHelper\nfrom .fields import TopologyField, SnappedLineStringField\n\n\nclass TopologyForm(CommonForm):\n \"\"\"\n This form is a bit specific :\n\n We use a field (topology) in order to edit the whole instance.\n Thus, at init, we load the instance into field, and at save, we\n save the field into the instance.\n\n The geom field is fully ignored, since we edit a topology.\n \"\"\"\n topology = TopologyField(label=\"\")\n\n def __init__(self, *args, **kwargs):\n super(TopologyForm, self).__init__(*args, **kwargs)\n if self.instance and self.instance.pk:\n self.fields['topology'].initial = self.instance\n\n def clean(self, *args, **kwargs):\n data = super(TopologyForm, self).clean()\n # geom is computed at db-level and never edited\n if 'geom' in self.errors:\n del self.errors['geom']\n return data\n\n def save(self, *args, **kwargs):\n topology = self.cleaned_data.pop('topology')\n instance = super(TopologyForm, self).save(*args, **kwargs)\n instance.mutate(topology)\n return instance\n\n geomfields = ['topology']\n\n class Meta(CommonForm.Meta):\n fields = CommonForm.Meta.fields + ['topology']\n\n MEDIA_JS = (\"core/dijkstra.js\",\n \"core/multipath.js\",\n \"core/topology_helper.js\") + CommonForm.MEDIA_JS\n\n\nclass PathForm(CommonForm):\n geom = SnappedLineStringField()\n\n reverse_geom = forms.BooleanField(required=False,\n label=_(\"Reverse path\"),\n help_text=_(\"The path will be reversed once saved\"))\n\n geomfields = ['geom']\n\n class Meta(CommonForm.Meta):\n model = Path\n fields = CommonForm.Meta.fields + \\\n ['structure',\n 'name', 'stake', 'comfort', 'trail', 'departure', 'arrival', 'comments',\n 'datasource', 'networks', 'usages', 'valid', 'reverse_geom', 'geom']\n\n def __init__(self, *args, **kwargs):\n super(PathForm, self).__init__(*args, **kwargs)\n self.fields['geom'].label = ''\n\n def clean_geom(self):\n geom = self.cleaned_data['geom']\n if geom is None:\n raise forms.ValidationError(_(\"Invalid snapped geometry.\"))\n if not geom.simple:\n raise forms.ValidationError(_(\"Geometry is not simple.\"))\n if not PathHelper.disjoint(geom, self.cleaned_data.get('pk') or -1):\n raise forms.ValidationError(_(\"Geometry overlaps another.\"))\n return geom\n\n def save(self, commit=True):\n path = super(PathForm, self).save(commit=False)\n\n if self.cleaned_data.get('reverse_geom'):\n path.reverse()\n\n if commit:\n path.save()\n self.save_m2m()\n\n return path\n", "path": "geotrek/core/forms.py"}]}
| 1,194 | 104 |
gh_patches_debug_32674
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-2268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
crash when export flow to 'r' curl
##### Steps to reproduce the problem:
1. enter a flow view
2. press E and choose r
3. crash
##### Any other comments? What have you tried so far?
##### System information
Traceback (most recent call last):
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/master.py", line 281, in run
self.loop.run()
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 278, in run
self._run()
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 376, in _run
self.event_loop.run()
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 682, in run
self._loop()
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 719, in _loop
self._watch_files[fd]()
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/raw_display.py", line 393, in <lambda>
event_loop, callback, self.get_available_raw_input())
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/raw_display.py", line 493, in parse_input
callback(processed, processed_codes)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 403, in _update
self.process_input(keys)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py", line 503, in process_input
k = self._topmost_widget.keypress(self.screen_size, k)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/window.py", line 84, in keypress
k = super().keypress(size, k)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/container.py", line 1116, in keypress
return self.footer.keypress((maxcol,),key)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py", line 155, in keypress
return self.master.ab.keypress(*args, **kwargs)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py", line 108, in keypress
self.prompt_execute(k)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py", line 133, in prompt_execute
msg = p(txt)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py", line 31, in __call__
return self.callback(txt, *self.args)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/common.py", line 328, in export_to_clip_or_file
writer(exporter(flow))
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/export.py", line 39, in curl_command
data += " --data-binary '%s'" % _native(request.content)
File "/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/export.py", line 13, in _native
return s.decode()
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 188: invalid start byte
<!--
Cut and paste the output of "mitmproxy --version".
If you're using an older version if mitmproxy, please specify the version
and OS.
-->
Mitmproxy version: 2.0.1 (release version)
Python version: 3.6.1
Platform: Darwin-16.1.0-x86_64-i386-64bit
SSL version: OpenSSL 1.1.0e 16 Feb 2017
Mac version: 10.12.1 ('', '', '') x86_64
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/export.py`
Content:
```
1 import io
2 import json
3 import pprint
4 import re
5 import textwrap
6 from typing import Any
7
8 from mitmproxy import http
9
10
11 def _native(s):
12 if isinstance(s, bytes):
13 return s.decode()
14 return s
15
16
17 def dictstr(items, indent: str) -> str:
18 lines = []
19 for k, v in items:
20 lines.append(indent + "%s: %s,\n" % (repr(_native(k)), repr(_native(v))))
21 return "{\n%s}\n" % "".join(lines)
22
23
24 def curl_command(flow: http.HTTPFlow) -> str:
25 data = "curl "
26
27 request = flow.request.copy()
28 request.decode(strict=False)
29
30 for k, v in request.headers.items(multi=True):
31 data += "-H '%s:%s' " % (k, v)
32
33 if request.method != "GET":
34 data += "-X %s " % request.method
35
36 data += "'%s'" % request.url
37
38 if request.content:
39 data += " --data-binary '%s'" % _native(request.content)
40
41 return data
42
43
44 def python_arg(arg: str, val: Any) -> str:
45 if not val:
46 return ""
47 if arg:
48 arg += "="
49 arg_str = "{}{},\n".format(
50 arg,
51 pprint.pformat(val, 79 - len(arg))
52 )
53 return textwrap.indent(arg_str, " " * 4)
54
55
56 def python_code(flow: http.HTTPFlow):
57 code = io.StringIO()
58
59 def writearg(arg, val):
60 code.write(python_arg(arg, val))
61
62 code.write("import requests\n")
63 code.write("\n")
64 if flow.request.method.lower() in ("get", "post", "put", "head", "delete", "patch"):
65 code.write("response = requests.{}(\n".format(flow.request.method.lower()))
66 else:
67 code.write("response = requests.request(\n")
68 writearg("", flow.request.method)
69 url_without_query = flow.request.url.split("?", 1)[0]
70 writearg("", url_without_query)
71
72 writearg("params", list(flow.request.query.fields))
73
74 headers = flow.request.headers.copy()
75 # requests adds those by default.
76 for x in (":authority", "host", "content-length"):
77 headers.pop(x, None)
78 writearg("headers", dict(headers))
79 try:
80 if "json" not in flow.request.headers.get("content-type", ""):
81 raise ValueError()
82 writearg("json", json.loads(flow.request.text))
83 except ValueError:
84 writearg("data", flow.request.content)
85
86 code.seek(code.tell() - 2) # remove last comma
87 code.write("\n)\n")
88 code.write("\n")
89 code.write("print(response.text)")
90
91 return code.getvalue()
92
93
94 def locust_code(flow):
95 code = textwrap.dedent("""
96 from locust import HttpLocust, TaskSet, task
97
98 class UserBehavior(TaskSet):
99 def on_start(self):
100 ''' on_start is called when a Locust start before any task is scheduled '''
101 self.{name}()
102
103 @task()
104 def {name}(self):
105 url = self.locust.host + '{path}'
106 {headers}{params}{data}
107 self.response = self.client.request(
108 method='{method}',
109 url=url,{args}
110 )
111
112 ### Additional tasks can go here ###
113
114
115 class WebsiteUser(HttpLocust):
116 task_set = UserBehavior
117 min_wait = 1000
118 max_wait = 3000
119 """).strip()
120
121 name = re.sub('\W|^(?=\d)', '_', flow.request.path.strip("/").split("?", 1)[0])
122 if not name:
123 new_name = "_".join([str(flow.request.host), str(flow.request.timestamp_start)])
124 name = re.sub('\W|^(?=\d)', '_', new_name)
125
126 path_without_query = flow.request.path.split("?")[0]
127
128 args = ""
129 headers = ""
130 if flow.request.headers:
131 lines = [
132 (_native(k), _native(v)) for k, v in flow.request.headers.fields
133 if _native(k).lower() not in [":authority", "host", "cookie"]
134 ]
135 lines = [" '%s': '%s',\n" % (k, v) for k, v in lines]
136 headers += "\n headers = {\n%s }\n" % "".join(lines)
137 args += "\n headers=headers,"
138
139 params = ""
140 if flow.request.query:
141 lines = [
142 " %s: %s,\n" % (repr(k), repr(v))
143 for k, v in
144 flow.request.query.collect()
145 ]
146 params = "\n params = {\n%s }\n" % "".join(lines)
147 args += "\n params=params,"
148
149 data = ""
150 if flow.request.content:
151 data = "\n data = '''%s'''\n" % _native(flow.request.content)
152 args += "\n data=data,"
153
154 code = code.format(
155 name=name,
156 path=path_without_query,
157 headers=headers,
158 params=params,
159 data=data,
160 method=flow.request.method,
161 args=args,
162 )
163
164 return code
165
166
167 def locust_task(flow):
168 code = locust_code(flow)
169 start_task = len(code.split('@task')[0]) - 4
170 end_task = -19 - len(code.split('### Additional')[1])
171 task_code = code[start_task:end_task]
172
173 return task_code
174
175
176 def url(flow):
177 return flow.request.url
178
179
180 EXPORTERS = [
181 ("content", "c", None),
182 ("headers+content", "h", None),
183 ("url", "u", url),
184 ("as curl command", "r", curl_command),
185 ("as python code", "p", python_code),
186 ("as locust code", "l", locust_code),
187 ("as locust task", "t", locust_task),
188 ]
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/export.py b/mitmproxy/export.py
--- a/mitmproxy/export.py
+++ b/mitmproxy/export.py
@@ -6,19 +6,7 @@
from typing import Any
from mitmproxy import http
-
-
-def _native(s):
- if isinstance(s, bytes):
- return s.decode()
- return s
-
-
-def dictstr(items, indent: str) -> str:
- lines = []
- for k, v in items:
- lines.append(indent + "%s: %s,\n" % (repr(_native(k)), repr(_native(v))))
- return "{\n%s}\n" % "".join(lines)
+from mitmproxy.utils import strutils
def curl_command(flow: http.HTTPFlow) -> str:
@@ -36,7 +24,10 @@
data += "'%s'" % request.url
if request.content:
- data += " --data-binary '%s'" % _native(request.content)
+ data += " --data-binary '%s'" % strutils.bytes_to_escaped_str(
+ request.content,
+ escape_single_quotes=True
+ )
return data
@@ -127,10 +118,14 @@
args = ""
headers = ""
+
+ def conv(x):
+ return strutils.bytes_to_escaped_str(x, escape_single_quotes=True)
+
if flow.request.headers:
lines = [
- (_native(k), _native(v)) for k, v in flow.request.headers.fields
- if _native(k).lower() not in [":authority", "host", "cookie"]
+ (conv(k), conv(v)) for k, v in flow.request.headers.fields
+ if conv(k).lower() not in [":authority", "host", "cookie"]
]
lines = [" '%s': '%s',\n" % (k, v) for k, v in lines]
headers += "\n headers = {\n%s }\n" % "".join(lines)
@@ -148,7 +143,7 @@
data = ""
if flow.request.content:
- data = "\n data = '''%s'''\n" % _native(flow.request.content)
+ data = "\n data = '''%s'''\n" % conv(flow.request.content)
args += "\n data=data,"
code = code.format(
|
{"golden_diff": "diff --git a/mitmproxy/export.py b/mitmproxy/export.py\n--- a/mitmproxy/export.py\n+++ b/mitmproxy/export.py\n@@ -6,19 +6,7 @@\n from typing import Any\n \n from mitmproxy import http\n-\n-\n-def _native(s):\n- if isinstance(s, bytes):\n- return s.decode()\n- return s\n-\n-\n-def dictstr(items, indent: str) -> str:\n- lines = []\n- for k, v in items:\n- lines.append(indent + \"%s: %s,\\n\" % (repr(_native(k)), repr(_native(v))))\n- return \"{\\n%s}\\n\" % \"\".join(lines)\n+from mitmproxy.utils import strutils\n \n \n def curl_command(flow: http.HTTPFlow) -> str:\n@@ -36,7 +24,10 @@\n data += \"'%s'\" % request.url\n \n if request.content:\n- data += \" --data-binary '%s'\" % _native(request.content)\n+ data += \" --data-binary '%s'\" % strutils.bytes_to_escaped_str(\n+ request.content,\n+ escape_single_quotes=True\n+ )\n \n return data\n \n@@ -127,10 +118,14 @@\n \n args = \"\"\n headers = \"\"\n+\n+ def conv(x):\n+ return strutils.bytes_to_escaped_str(x, escape_single_quotes=True)\n+\n if flow.request.headers:\n lines = [\n- (_native(k), _native(v)) for k, v in flow.request.headers.fields\n- if _native(k).lower() not in [\":authority\", \"host\", \"cookie\"]\n+ (conv(k), conv(v)) for k, v in flow.request.headers.fields\n+ if conv(k).lower() not in [\":authority\", \"host\", \"cookie\"]\n ]\n lines = [\" '%s': '%s',\\n\" % (k, v) for k, v in lines]\n headers += \"\\n headers = {\\n%s }\\n\" % \"\".join(lines)\n@@ -148,7 +143,7 @@\n \n data = \"\"\n if flow.request.content:\n- data = \"\\n data = '''%s'''\\n\" % _native(flow.request.content)\n+ data = \"\\n data = '''%s'''\\n\" % conv(flow.request.content)\n args += \"\\n data=data,\"\n \n code = code.format(\n", "issue": "crash when export flow to 'r' curl\n##### Steps to reproduce the problem:\r\n\r\n1. enter a flow view\r\n2. press E and choose r\r\n3. crash\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\n\r\n\r\n##### System information\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/master.py\", line 281, in run\r\n self.loop.run()\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 278, in run\r\n self._run()\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 376, in _run\r\n self.event_loop.run()\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 682, in run\r\n self._loop()\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 719, in _loop\r\n self._watch_files[fd]()\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/raw_display.py\", line 393, in <lambda>\r\n event_loop, callback, self.get_available_raw_input())\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/raw_display.py\", line 493, in parse_input\r\n callback(processed, processed_codes)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 403, in _update\r\n self.process_input(keys)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/main_loop.py\", line 503, in process_input\r\n k = self._topmost_widget.keypress(self.screen_size, k)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/window.py\", line 84, in keypress\r\n k = super().keypress(size, k)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/urwid/container.py\", line 1116, in keypress\r\n return self.footer.keypress((maxcol,),key)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py\", line 155, in keypress\r\n return self.master.ab.keypress(*args, **kwargs)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py\", line 108, in keypress\r\n self.prompt_execute(k)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py\", line 133, in prompt_execute\r\n msg = p(txt)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/statusbar.py\", line 31, in __call__\r\n return self.callback(txt, *self.args)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/tools/console/common.py\", line 328, in export_to_clip_or_file\r\n writer(exporter(flow))\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/export.py\", line 39, in curl_command\r\n data += \" --data-binary '%s'\" % _native(request.content)\r\n File \"/usr/local/Cellar/mitmproxy/2.0.1/libexec/lib/python3.6/site-packages/mitmproxy/export.py\", line 13, in _native\r\n return s.decode()\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 188: invalid start byte\r\n\r\n<!--\r\n Cut and paste the output of \"mitmproxy --version\".\r\n\r\n If you're using an older version if mitmproxy, please specify the version\r\n and OS.\r\n-->\r\nMitmproxy version: 2.0.1 (release version)\r\nPython version: 3.6.1\r\nPlatform: Darwin-16.1.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.1.0e 16 Feb 2017\r\nMac version: 10.12.1 ('', '', '') x86_64\n", "before_files": [{"content": "import io\nimport json\nimport pprint\nimport re\nimport textwrap\nfrom typing import Any\n\nfrom mitmproxy import http\n\n\ndef _native(s):\n if isinstance(s, bytes):\n return s.decode()\n return s\n\n\ndef dictstr(items, indent: str) -> str:\n lines = []\n for k, v in items:\n lines.append(indent + \"%s: %s,\\n\" % (repr(_native(k)), repr(_native(v))))\n return \"{\\n%s}\\n\" % \"\".join(lines)\n\n\ndef curl_command(flow: http.HTTPFlow) -> str:\n data = \"curl \"\n\n request = flow.request.copy()\n request.decode(strict=False)\n\n for k, v in request.headers.items(multi=True):\n data += \"-H '%s:%s' \" % (k, v)\n\n if request.method != \"GET\":\n data += \"-X %s \" % request.method\n\n data += \"'%s'\" % request.url\n\n if request.content:\n data += \" --data-binary '%s'\" % _native(request.content)\n\n return data\n\n\ndef python_arg(arg: str, val: Any) -> str:\n if not val:\n return \"\"\n if arg:\n arg += \"=\"\n arg_str = \"{}{},\\n\".format(\n arg,\n pprint.pformat(val, 79 - len(arg))\n )\n return textwrap.indent(arg_str, \" \" * 4)\n\n\ndef python_code(flow: http.HTTPFlow):\n code = io.StringIO()\n\n def writearg(arg, val):\n code.write(python_arg(arg, val))\n\n code.write(\"import requests\\n\")\n code.write(\"\\n\")\n if flow.request.method.lower() in (\"get\", \"post\", \"put\", \"head\", \"delete\", \"patch\"):\n code.write(\"response = requests.{}(\\n\".format(flow.request.method.lower()))\n else:\n code.write(\"response = requests.request(\\n\")\n writearg(\"\", flow.request.method)\n url_without_query = flow.request.url.split(\"?\", 1)[0]\n writearg(\"\", url_without_query)\n\n writearg(\"params\", list(flow.request.query.fields))\n\n headers = flow.request.headers.copy()\n # requests adds those by default.\n for x in (\":authority\", \"host\", \"content-length\"):\n headers.pop(x, None)\n writearg(\"headers\", dict(headers))\n try:\n if \"json\" not in flow.request.headers.get(\"content-type\", \"\"):\n raise ValueError()\n writearg(\"json\", json.loads(flow.request.text))\n except ValueError:\n writearg(\"data\", flow.request.content)\n\n code.seek(code.tell() - 2) # remove last comma\n code.write(\"\\n)\\n\")\n code.write(\"\\n\")\n code.write(\"print(response.text)\")\n\n return code.getvalue()\n\n\ndef locust_code(flow):\n code = textwrap.dedent(\"\"\"\n from locust import HttpLocust, TaskSet, task\n\n class UserBehavior(TaskSet):\n def on_start(self):\n ''' on_start is called when a Locust start before any task is scheduled '''\n self.{name}()\n\n @task()\n def {name}(self):\n url = self.locust.host + '{path}'\n {headers}{params}{data}\n self.response = self.client.request(\n method='{method}',\n url=url,{args}\n )\n\n ### Additional tasks can go here ###\n\n\n class WebsiteUser(HttpLocust):\n task_set = UserBehavior\n min_wait = 1000\n max_wait = 3000\n\"\"\").strip()\n\n name = re.sub('\\W|^(?=\\d)', '_', flow.request.path.strip(\"/\").split(\"?\", 1)[0])\n if not name:\n new_name = \"_\".join([str(flow.request.host), str(flow.request.timestamp_start)])\n name = re.sub('\\W|^(?=\\d)', '_', new_name)\n\n path_without_query = flow.request.path.split(\"?\")[0]\n\n args = \"\"\n headers = \"\"\n if flow.request.headers:\n lines = [\n (_native(k), _native(v)) for k, v in flow.request.headers.fields\n if _native(k).lower() not in [\":authority\", \"host\", \"cookie\"]\n ]\n lines = [\" '%s': '%s',\\n\" % (k, v) for k, v in lines]\n headers += \"\\n headers = {\\n%s }\\n\" % \"\".join(lines)\n args += \"\\n headers=headers,\"\n\n params = \"\"\n if flow.request.query:\n lines = [\n \" %s: %s,\\n\" % (repr(k), repr(v))\n for k, v in\n flow.request.query.collect()\n ]\n params = \"\\n params = {\\n%s }\\n\" % \"\".join(lines)\n args += \"\\n params=params,\"\n\n data = \"\"\n if flow.request.content:\n data = \"\\n data = '''%s'''\\n\" % _native(flow.request.content)\n args += \"\\n data=data,\"\n\n code = code.format(\n name=name,\n path=path_without_query,\n headers=headers,\n params=params,\n data=data,\n method=flow.request.method,\n args=args,\n )\n\n return code\n\n\ndef locust_task(flow):\n code = locust_code(flow)\n start_task = len(code.split('@task')[0]) - 4\n end_task = -19 - len(code.split('### Additional')[1])\n task_code = code[start_task:end_task]\n\n return task_code\n\n\ndef url(flow):\n return flow.request.url\n\n\nEXPORTERS = [\n (\"content\", \"c\", None),\n (\"headers+content\", \"h\", None),\n (\"url\", \"u\", url),\n (\"as curl command\", \"r\", curl_command),\n (\"as python code\", \"p\", python_code),\n (\"as locust code\", \"l\", locust_code),\n (\"as locust task\", \"t\", locust_task),\n]\n", "path": "mitmproxy/export.py"}], "after_files": [{"content": "import io\nimport json\nimport pprint\nimport re\nimport textwrap\nfrom typing import Any\n\nfrom mitmproxy import http\nfrom mitmproxy.utils import strutils\n\n\ndef curl_command(flow: http.HTTPFlow) -> str:\n data = \"curl \"\n\n request = flow.request.copy()\n request.decode(strict=False)\n\n for k, v in request.headers.items(multi=True):\n data += \"-H '%s:%s' \" % (k, v)\n\n if request.method != \"GET\":\n data += \"-X %s \" % request.method\n\n data += \"'%s'\" % request.url\n\n if request.content:\n data += \" --data-binary '%s'\" % strutils.bytes_to_escaped_str(\n request.content,\n escape_single_quotes=True\n )\n\n return data\n\n\ndef python_arg(arg: str, val: Any) -> str:\n if not val:\n return \"\"\n if arg:\n arg += \"=\"\n arg_str = \"{}{},\\n\".format(\n arg,\n pprint.pformat(val, 79 - len(arg))\n )\n return textwrap.indent(arg_str, \" \" * 4)\n\n\ndef python_code(flow: http.HTTPFlow):\n code = io.StringIO()\n\n def writearg(arg, val):\n code.write(python_arg(arg, val))\n\n code.write(\"import requests\\n\")\n code.write(\"\\n\")\n if flow.request.method.lower() in (\"get\", \"post\", \"put\", \"head\", \"delete\", \"patch\"):\n code.write(\"response = requests.{}(\\n\".format(flow.request.method.lower()))\n else:\n code.write(\"response = requests.request(\\n\")\n writearg(\"\", flow.request.method)\n url_without_query = flow.request.url.split(\"?\", 1)[0]\n writearg(\"\", url_without_query)\n\n writearg(\"params\", list(flow.request.query.fields))\n\n headers = flow.request.headers.copy()\n # requests adds those by default.\n for x in (\":authority\", \"host\", \"content-length\"):\n headers.pop(x, None)\n writearg(\"headers\", dict(headers))\n try:\n if \"json\" not in flow.request.headers.get(\"content-type\", \"\"):\n raise ValueError()\n writearg(\"json\", json.loads(flow.request.text))\n except ValueError:\n writearg(\"data\", flow.request.content)\n\n code.seek(code.tell() - 2) # remove last comma\n code.write(\"\\n)\\n\")\n code.write(\"\\n\")\n code.write(\"print(response.text)\")\n\n return code.getvalue()\n\n\ndef locust_code(flow):\n code = textwrap.dedent(\"\"\"\n from locust import HttpLocust, TaskSet, task\n\n class UserBehavior(TaskSet):\n def on_start(self):\n ''' on_start is called when a Locust start before any task is scheduled '''\n self.{name}()\n\n @task()\n def {name}(self):\n url = self.locust.host + '{path}'\n {headers}{params}{data}\n self.response = self.client.request(\n method='{method}',\n url=url,{args}\n )\n\n ### Additional tasks can go here ###\n\n\n class WebsiteUser(HttpLocust):\n task_set = UserBehavior\n min_wait = 1000\n max_wait = 3000\n\"\"\").strip()\n\n name = re.sub('\\W|^(?=\\d)', '_', flow.request.path.strip(\"/\").split(\"?\", 1)[0])\n if not name:\n new_name = \"_\".join([str(flow.request.host), str(flow.request.timestamp_start)])\n name = re.sub('\\W|^(?=\\d)', '_', new_name)\n\n path_without_query = flow.request.path.split(\"?\")[0]\n\n args = \"\"\n headers = \"\"\n\n def conv(x):\n return strutils.bytes_to_escaped_str(x, escape_single_quotes=True)\n\n if flow.request.headers:\n lines = [\n (conv(k), conv(v)) for k, v in flow.request.headers.fields\n if conv(k).lower() not in [\":authority\", \"host\", \"cookie\"]\n ]\n lines = [\" '%s': '%s',\\n\" % (k, v) for k, v in lines]\n headers += \"\\n headers = {\\n%s }\\n\" % \"\".join(lines)\n args += \"\\n headers=headers,\"\n\n params = \"\"\n if flow.request.query:\n lines = [\n \" %s: %s,\\n\" % (repr(k), repr(v))\n for k, v in\n flow.request.query.collect()\n ]\n params = \"\\n params = {\\n%s }\\n\" % \"\".join(lines)\n args += \"\\n params=params,\"\n\n data = \"\"\n if flow.request.content:\n data = \"\\n data = '''%s'''\\n\" % conv(flow.request.content)\n args += \"\\n data=data,\"\n\n code = code.format(\n name=name,\n path=path_without_query,\n headers=headers,\n params=params,\n data=data,\n method=flow.request.method,\n args=args,\n )\n\n return code\n\n\ndef locust_task(flow):\n code = locust_code(flow)\n start_task = len(code.split('@task')[0]) - 4\n end_task = -19 - len(code.split('### Additional')[1])\n task_code = code[start_task:end_task]\n\n return task_code\n\n\ndef url(flow):\n return flow.request.url\n\n\nEXPORTERS = [\n (\"content\", \"c\", None),\n (\"headers+content\", \"h\", None),\n (\"url\", \"u\", url),\n (\"as curl command\", \"r\", curl_command),\n (\"as python code\", \"p\", python_code),\n (\"as locust code\", \"l\", locust_code),\n (\"as locust task\", \"t\", locust_task),\n]\n", "path": "mitmproxy/export.py"}]}
| 3,197 | 534 |
gh_patches_debug_29336
|
rasdani/github-patches
|
git_diff
|
AnalogJ__lexicon-270
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Regression on #PR-203] CloudNS authentication broken when `auth-id` supplied
Provider CloudNS fails to authenticate when using auth-id for authentication, rather than auth-subid or auth-subuser.
Line 146 in lexicon/providers/cloudns.py reads `if self.is_given_option(self.options['auth_id']):`. It should instead should read `if self.is_given_option('auth_id'):`.
I've inlined a patch below which fixes this. Sorry if this is more effort for you than a PR.
--- cloudns.py
+++ cloudns.pr203fix.py
@@ -143,7 +143,7 @@
if not self.options['auth_password']:
raise Exception('No valid authentication data passed, expected: auth-password')
- if self._is_given_option(self.options['auth_id']):
+ if self._is_given_option('auth_id'):
return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subid'):
return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/cloudns.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import logging
5 import requests
6
7 from .base import Provider as BaseProvider
8
9 logger = logging.getLogger(__name__)
10
11
12 def ProviderParser(subparser):
13 identity_group = subparser.add_mutually_exclusive_group()
14 identity_group.add_argument("--auth-id", help="specify user id used to authenticate")
15 identity_group.add_argument("--auth-subid", help="specify subuser id used to authenticate")
16 identity_group.add_argument("--auth-subuser", help="specify subuser name used to authenticate")
17 subparser.add_argument("--auth-password", help="specify password used to authenticate")
18 subparser.add_argument("--weight", help="specify the SRV record weight")
19 subparser.add_argument("--port", help="specify the SRV record port")
20
21
22 class Provider(BaseProvider):
23 def __init__(self, options, engine_overrides=None):
24 super(Provider, self).__init__(options, engine_overrides)
25 self.domain_id = None
26 self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudns.net')
27
28 def authenticate(self):
29 payload = self._get('/dns/get-zone-info.json', {'domain-name': self.options['domain']})
30 self.domain_id = payload['name']
31 logger.debug('authenticate: %s', payload)
32
33 def create_record(self, type, name, content):
34 # Skip execution if such a record already exists
35 existing_records = self.list_records(type, name, content)
36 if len(existing_records) > 0:
37 return True
38
39 # Build parameters for adding a new record
40 params = {
41 'domain-name': self.domain_id,
42 'record-type': type,
43 'host': self._relative_name(name),
44 'record': content
45 }
46 if self.options['ttl']:
47 params['ttl'] = self.options['ttl']
48 if self.options['priority']:
49 params['priority'] = self.options['priority']
50 if self.options['weight']:
51 params['weight'] = self.options['weight']
52 if self.options['port']:
53 params['port'] = self.options['port']
54
55 # Add new record by calling the ClouDNS API
56 payload = self._post('/dns/add-record.json', params)
57 logger.debug('create_record: %s', payload)
58
59 # Error handling is already covered by self._request
60 return True
61
62 def list_records(self, type=None, name=None, content=None):
63 # Build parameters to make use of the built-in API filtering
64 params = {'domain-name': self.domain_id}
65 if type:
66 params['type'] = type
67 if name:
68 params['host'] = self._relative_name(name)
69
70 # Fetch and parse all records for the given zone
71 payload = self._get('/dns/records.json', params)
72 payload = payload if not isinstance(payload, list) else {}
73 records = []
74 for record in payload.values():
75 records.append({
76 'type': record['type'],
77 'name': self._full_name(record['host']),
78 'ttl': record['ttl'],
79 'content': record['record'],
80 'id': record['id']
81 })
82
83 # Filter by content manually as API does not support that
84 if content:
85 records = [record for record in records if record['content'] == content]
86
87 # Print records as debug output and return them
88 logger.debug('list_records: %s', records)
89 return records
90
91 def update_record(self, identifier, type=None, name=None, content=None):
92 # Try to find record if no identifier was specified
93 if not identifier:
94 identifier = self._find_record_identifier(type, name, None)
95
96 # Build parameters for updating an existing record
97 params = {'domain-name': self.domain_id, 'record-id': identifier}
98 if name:
99 params['host'] = self._relative_name(name)
100 if content:
101 params['record'] = content
102 if self.options.get('ttl'):
103 params['ttl'] = self.options.get('ttl')
104 if self.options['priority']:
105 params['priority'] = self.options['priority']
106 if self.options['weight']:
107 params['weight'] = self.options['weight']
108 if self.options['port']:
109 params['port'] = self.options['port']
110
111 # Update existing record by calling the ClouDNS API
112 payload = self._post('/dns/mod-record.json', params)
113 logger.debug('update_record: %s', payload)
114
115 # Error handling is already covered by self._request
116 return True
117
118 def delete_record(self, identifier=None, type=None, name=None, content=None):
119 # Try to find record if no identifier was specified
120 delete_record_id = []
121 if not identifier:
122 records = self.list_records(type, name, content)
123 delete_record_id = [record['id'] for record in records]
124 else:
125 delete_record_id.append(identifier)
126
127 logger.debug('delete_records: %s', delete_record_id)
128
129 for record_id in delete_record_id:
130 # Delete existing record by calling the ClouDNS API
131 payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})
132
133 logger.debug('delete_record: %s', True)
134
135 # Error handling is already covered by self._request
136 return True
137
138 def _is_given_option(self, key):
139 fallback_fn = self.engine_overrides.get('fallbackFn', (lambda x: None))
140 return self.options[key] and self.options[key] != fallback_fn(key)
141
142 def _build_authentication_data(self):
143 if not self.options['auth_password']:
144 raise Exception('No valid authentication data passed, expected: auth-password')
145
146 if self._is_given_option(self.options['auth_id']):
147 return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
148 elif self._is_given_option('auth_subid'):
149 return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
150 elif self._is_given_option('auth_subuser'):
151 return {'sub-auth-user': self.options['auth_subuser'], 'auth-password': self.options['auth_password']}
152 elif self.options['auth_id'] or self.options['auth_subid'] or self.options['auth_subuser']:
153 # All the options were passed with a fallback value, return an empty dictionary.
154 return {}
155 else:
156 raise Exception('No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser')
157
158 def _find_record_identifier(self, type, name, content):
159 records = self.list_records(type, name, content)
160 logger.debug('records: %s', records)
161 if len(records) == 1:
162 return records[0]['id']
163 else:
164 raise Exception('Record identifier could not be found.')
165
166 def _request(self, action='GET', url='/', data=None, query_params=None):
167 # Set default values for missing arguments
168 data = data if data else {}
169 query_params = query_params if query_params else {}
170
171 # Merge authentication data into request
172 if action == 'GET':
173 query_params.update(self._build_authentication_data())
174 else:
175 data.update(self._build_authentication_data())
176
177 # Fire request against ClouDNS API and parse result as JSON
178 r = requests.request(action, self.api_endpoint + url, params=query_params, data=data)
179 r.raise_for_status()
180 payload = r.json()
181
182 # Check ClouDNS specific status code and description
183 if 'status' in payload and 'statusDescription' in payload and payload['status'] != 'Success':
184 raise Exception('ClouDNS API request has failed: ' + payload['statusDescription'])
185
186 # Return payload
187 return payload
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lexicon/providers/cloudns.py b/lexicon/providers/cloudns.py
--- a/lexicon/providers/cloudns.py
+++ b/lexicon/providers/cloudns.py
@@ -123,13 +123,13 @@
delete_record_id = [record['id'] for record in records]
else:
delete_record_id.append(identifier)
-
+
logger.debug('delete_records: %s', delete_record_id)
-
+
for record_id in delete_record_id:
# Delete existing record by calling the ClouDNS API
payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})
-
+
logger.debug('delete_record: %s', True)
# Error handling is already covered by self._request
@@ -143,7 +143,7 @@
if not self.options['auth_password']:
raise Exception('No valid authentication data passed, expected: auth-password')
- if self._is_given_option(self.options['auth_id']):
+ if self._is_given_option('auth_id'):
return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}
elif self._is_given_option('auth_subid'):
return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}
|
{"golden_diff": "diff --git a/lexicon/providers/cloudns.py b/lexicon/providers/cloudns.py\n--- a/lexicon/providers/cloudns.py\n+++ b/lexicon/providers/cloudns.py\n@@ -123,13 +123,13 @@\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n- \n+\n logger.debug('delete_records: %s', delete_record_id)\n- \n+\n for record_id in delete_record_id:\n # Delete existing record by calling the ClouDNS API\n payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})\n- \n+\n logger.debug('delete_record: %s', True)\n \n # Error handling is already covered by self._request\n@@ -143,7 +143,7 @@\n if not self.options['auth_password']:\n raise Exception('No valid authentication data passed, expected: auth-password')\n \n- if self._is_given_option(self.options['auth_id']):\n+ if self._is_given_option('auth_id'):\n return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}\n elif self._is_given_option('auth_subid'):\n return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}\n", "issue": "[Regression on #PR-203] CloudNS authentication broken when `auth-id` supplied\nProvider CloudNS fails to authenticate when using auth-id for authentication, rather than auth-subid or auth-subuser.\r\n\r\nLine 146 in lexicon/providers/cloudns.py reads `if self.is_given_option(self.options['auth_id']):`. It should instead should read `if self.is_given_option('auth_id'):`.\r\n\r\nI've inlined a patch below which fixes this. Sorry if this is more effort for you than a PR.\r\n\r\n --- cloudns.py\r\n +++ cloudns.pr203fix.py\r\n @@ -143,7 +143,7 @@ \r\n if not self.options['auth_password']:\r\n raise Exception('No valid authentication data passed, expected: auth-password')\r\n \r\n - if self._is_given_option(self.options['auth_id']):\r\n + if self._is_given_option('auth_id'):\r\n return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}\r\n elif self._is_given_option('auth_subid'):\r\n return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport requests\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n identity_group = subparser.add_mutually_exclusive_group()\n identity_group.add_argument(\"--auth-id\", help=\"specify user id used to authenticate\")\n identity_group.add_argument(\"--auth-subid\", help=\"specify subuser id used to authenticate\")\n identity_group.add_argument(\"--auth-subuser\", help=\"specify subuser name used to authenticate\")\n subparser.add_argument(\"--auth-password\", help=\"specify password used to authenticate\")\n subparser.add_argument(\"--weight\", help=\"specify the SRV record weight\")\n subparser.add_argument(\"--port\", help=\"specify the SRV record port\")\n\n\nclass Provider(BaseProvider):\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.domain_id = None\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudns.net')\n\n def authenticate(self):\n payload = self._get('/dns/get-zone-info.json', {'domain-name': self.options['domain']})\n self.domain_id = payload['name']\n logger.debug('authenticate: %s', payload)\n\n def create_record(self, type, name, content):\n # Skip execution if such a record already exists\n existing_records = self.list_records(type, name, content)\n if len(existing_records) > 0:\n return True\n\n # Build parameters for adding a new record\n params = {\n 'domain-name': self.domain_id,\n 'record-type': type,\n 'host': self._relative_name(name),\n 'record': content\n }\n if self.options['ttl']:\n params['ttl'] = self.options['ttl']\n if self.options['priority']:\n params['priority'] = self.options['priority']\n if self.options['weight']:\n params['weight'] = self.options['weight']\n if self.options['port']:\n params['port'] = self.options['port']\n\n # Add new record by calling the ClouDNS API\n payload = self._post('/dns/add-record.json', params)\n logger.debug('create_record: %s', payload)\n\n # Error handling is already covered by self._request\n return True\n\n def list_records(self, type=None, name=None, content=None):\n # Build parameters to make use of the built-in API filtering\n params = {'domain-name': self.domain_id}\n if type:\n params['type'] = type\n if name:\n params['host'] = self._relative_name(name)\n\n # Fetch and parse all records for the given zone\n payload = self._get('/dns/records.json', params)\n payload = payload if not isinstance(payload, list) else {}\n records = []\n for record in payload.values():\n records.append({\n 'type': record['type'],\n 'name': self._full_name(record['host']),\n 'ttl': record['ttl'],\n 'content': record['record'],\n 'id': record['id']\n })\n\n # Filter by content manually as API does not support that\n if content:\n records = [record for record in records if record['content'] == content]\n\n # Print records as debug output and return them\n logger.debug('list_records: %s', records)\n return records\n\n def update_record(self, identifier, type=None, name=None, content=None):\n # Try to find record if no identifier was specified\n if not identifier:\n identifier = self._find_record_identifier(type, name, None)\n\n # Build parameters for updating an existing record\n params = {'domain-name': self.domain_id, 'record-id': identifier}\n if name:\n params['host'] = self._relative_name(name)\n if content:\n params['record'] = content\n if self.options.get('ttl'):\n params['ttl'] = self.options.get('ttl')\n if self.options['priority']:\n params['priority'] = self.options['priority']\n if self.options['weight']:\n params['weight'] = self.options['weight']\n if self.options['port']:\n params['port'] = self.options['port']\n\n # Update existing record by calling the ClouDNS API\n payload = self._post('/dns/mod-record.json', params)\n logger.debug('update_record: %s', payload)\n\n # Error handling is already covered by self._request\n return True\n\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n # Try to find record if no identifier was specified\n delete_record_id = []\n if not identifier:\n records = self.list_records(type, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n \n logger.debug('delete_records: %s', delete_record_id)\n \n for record_id in delete_record_id:\n # Delete existing record by calling the ClouDNS API\n payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})\n \n logger.debug('delete_record: %s', True)\n\n # Error handling is already covered by self._request\n return True\n\n def _is_given_option(self, key):\n fallback_fn = self.engine_overrides.get('fallbackFn', (lambda x: None))\n return self.options[key] and self.options[key] != fallback_fn(key)\n\n def _build_authentication_data(self):\n if not self.options['auth_password']:\n raise Exception('No valid authentication data passed, expected: auth-password')\n\n if self._is_given_option(self.options['auth_id']):\n return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}\n elif self._is_given_option('auth_subid'):\n return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}\n elif self._is_given_option('auth_subuser'):\n return {'sub-auth-user': self.options['auth_subuser'], 'auth-password': self.options['auth_password']}\n elif self.options['auth_id'] or self.options['auth_subid'] or self.options['auth_subuser']:\n # All the options were passed with a fallback value, return an empty dictionary.\n return {}\n else:\n raise Exception('No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser')\n\n def _find_record_identifier(self, type, name, content):\n records = self.list_records(type, name, content)\n logger.debug('records: %s', records)\n if len(records) == 1:\n return records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n # Set default values for missing arguments\n data = data if data else {}\n query_params = query_params if query_params else {}\n\n # Merge authentication data into request\n if action == 'GET':\n query_params.update(self._build_authentication_data())\n else:\n data.update(self._build_authentication_data())\n\n # Fire request against ClouDNS API and parse result as JSON\n r = requests.request(action, self.api_endpoint + url, params=query_params, data=data)\n r.raise_for_status()\n payload = r.json()\n\n # Check ClouDNS specific status code and description\n if 'status' in payload and 'statusDescription' in payload and payload['status'] != 'Success':\n raise Exception('ClouDNS API request has failed: ' + payload['statusDescription'])\n\n # Return payload\n return payload\n", "path": "lexicon/providers/cloudns.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport requests\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n identity_group = subparser.add_mutually_exclusive_group()\n identity_group.add_argument(\"--auth-id\", help=\"specify user id used to authenticate\")\n identity_group.add_argument(\"--auth-subid\", help=\"specify subuser id used to authenticate\")\n identity_group.add_argument(\"--auth-subuser\", help=\"specify subuser name used to authenticate\")\n subparser.add_argument(\"--auth-password\", help=\"specify password used to authenticate\")\n subparser.add_argument(\"--weight\", help=\"specify the SRV record weight\")\n subparser.add_argument(\"--port\", help=\"specify the SRV record port\")\n\n\nclass Provider(BaseProvider):\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.domain_id = None\n self.api_endpoint = self.engine_overrides.get('api_endpoint', 'https://api.cloudns.net')\n\n def authenticate(self):\n payload = self._get('/dns/get-zone-info.json', {'domain-name': self.options['domain']})\n self.domain_id = payload['name']\n logger.debug('authenticate: %s', payload)\n\n def create_record(self, type, name, content):\n # Skip execution if such a record already exists\n existing_records = self.list_records(type, name, content)\n if len(existing_records) > 0:\n return True\n\n # Build parameters for adding a new record\n params = {\n 'domain-name': self.domain_id,\n 'record-type': type,\n 'host': self._relative_name(name),\n 'record': content\n }\n if self.options['ttl']:\n params['ttl'] = self.options['ttl']\n if self.options['priority']:\n params['priority'] = self.options['priority']\n if self.options['weight']:\n params['weight'] = self.options['weight']\n if self.options['port']:\n params['port'] = self.options['port']\n\n # Add new record by calling the ClouDNS API\n payload = self._post('/dns/add-record.json', params)\n logger.debug('create_record: %s', payload)\n\n # Error handling is already covered by self._request\n return True\n\n def list_records(self, type=None, name=None, content=None):\n # Build parameters to make use of the built-in API filtering\n params = {'domain-name': self.domain_id}\n if type:\n params['type'] = type\n if name:\n params['host'] = self._relative_name(name)\n\n # Fetch and parse all records for the given zone\n payload = self._get('/dns/records.json', params)\n payload = payload if not isinstance(payload, list) else {}\n records = []\n for record in payload.values():\n records.append({\n 'type': record['type'],\n 'name': self._full_name(record['host']),\n 'ttl': record['ttl'],\n 'content': record['record'],\n 'id': record['id']\n })\n\n # Filter by content manually as API does not support that\n if content:\n records = [record for record in records if record['content'] == content]\n\n # Print records as debug output and return them\n logger.debug('list_records: %s', records)\n return records\n\n def update_record(self, identifier, type=None, name=None, content=None):\n # Try to find record if no identifier was specified\n if not identifier:\n identifier = self._find_record_identifier(type, name, None)\n\n # Build parameters for updating an existing record\n params = {'domain-name': self.domain_id, 'record-id': identifier}\n if name:\n params['host'] = self._relative_name(name)\n if content:\n params['record'] = content\n if self.options.get('ttl'):\n params['ttl'] = self.options.get('ttl')\n if self.options['priority']:\n params['priority'] = self.options['priority']\n if self.options['weight']:\n params['weight'] = self.options['weight']\n if self.options['port']:\n params['port'] = self.options['port']\n\n # Update existing record by calling the ClouDNS API\n payload = self._post('/dns/mod-record.json', params)\n logger.debug('update_record: %s', payload)\n\n # Error handling is already covered by self._request\n return True\n\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n # Try to find record if no identifier was specified\n delete_record_id = []\n if not identifier:\n records = self.list_records(type, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n\n logger.debug('delete_records: %s', delete_record_id)\n\n for record_id in delete_record_id:\n # Delete existing record by calling the ClouDNS API\n payload = self._post('/dns/delete-record.json', {'domain-name': self.domain_id, 'record-id': record_id})\n\n logger.debug('delete_record: %s', True)\n\n # Error handling is already covered by self._request\n return True\n\n def _is_given_option(self, key):\n fallback_fn = self.engine_overrides.get('fallbackFn', (lambda x: None))\n return self.options[key] and self.options[key] != fallback_fn(key)\n\n def _build_authentication_data(self):\n if not self.options['auth_password']:\n raise Exception('No valid authentication data passed, expected: auth-password')\n\n if self._is_given_option('auth_id'):\n return {'auth-id': self.options['auth_id'], 'auth-password': self.options['auth_password']}\n elif self._is_given_option('auth_subid'):\n return {'sub-auth-id': self.options['auth_subid'], 'auth-password': self.options['auth_password']}\n elif self._is_given_option('auth_subuser'):\n return {'sub-auth-user': self.options['auth_subuser'], 'auth-password': self.options['auth_password']}\n elif self.options['auth_id'] or self.options['auth_subid'] or self.options['auth_subuser']:\n # All the options were passed with a fallback value, return an empty dictionary.\n return {}\n else:\n raise Exception('No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser')\n\n def _find_record_identifier(self, type, name, content):\n records = self.list_records(type, name, content)\n logger.debug('records: %s', records)\n if len(records) == 1:\n return records[0]['id']\n else:\n raise Exception('Record identifier could not be found.')\n\n def _request(self, action='GET', url='/', data=None, query_params=None):\n # Set default values for missing arguments\n data = data if data else {}\n query_params = query_params if query_params else {}\n\n # Merge authentication data into request\n if action == 'GET':\n query_params.update(self._build_authentication_data())\n else:\n data.update(self._build_authentication_data())\n\n # Fire request against ClouDNS API and parse result as JSON\n r = requests.request(action, self.api_endpoint + url, params=query_params, data=data)\n r.raise_for_status()\n payload = r.json()\n\n # Check ClouDNS specific status code and description\n if 'status' in payload and 'statusDescription' in payload and payload['status'] != 'Success':\n raise Exception('ClouDNS API request has failed: ' + payload['statusDescription'])\n\n # Return payload\n return payload\n", "path": "lexicon/providers/cloudns.py"}]}
| 2,664 | 301 |
gh_patches_debug_4559
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-6790
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/32 IP's not automatically represented in /32 subnet.
### NetBox version
v2.11.7
### Python version
3.7
### Steps to Reproduce
1) Have a /32 IP address,
2) Create a /32 Subnet for that IP address
3) In the new subnet on the IP addresses tab observe IP's.
### Expected Behavior
IP address reference should be observed and not the ability to assign an IP address in the subnet.
### Observed Behavior
'Available IP' Button is present instead of a reference to the IP address as per 1).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/ipam/lookups.py`
Content:
```
1 from django.db.models import IntegerField, Lookup, Transform, lookups
2
3
4 class NetFieldDecoratorMixin(object):
5
6 def process_lhs(self, qn, connection, lhs=None):
7 lhs = lhs or self.lhs
8 lhs_string, lhs_params = qn.compile(lhs)
9 lhs_string = 'TEXT(%s)' % lhs_string
10 return lhs_string, lhs_params
11
12
13 class IExact(NetFieldDecoratorMixin, lookups.IExact):
14
15 def get_rhs_op(self, connection, rhs):
16 return '= LOWER(%s)' % rhs
17
18
19 class EndsWith(NetFieldDecoratorMixin, lookups.EndsWith):
20 pass
21
22
23 class IEndsWith(NetFieldDecoratorMixin, lookups.IEndsWith):
24 pass
25
26 def get_rhs_op(self, connection, rhs):
27 return 'LIKE LOWER(%s)' % rhs
28
29
30 class StartsWith(NetFieldDecoratorMixin, lookups.StartsWith):
31 lookup_name = 'startswith'
32
33
34 class IStartsWith(NetFieldDecoratorMixin, lookups.IStartsWith):
35 pass
36
37 def get_rhs_op(self, connection, rhs):
38 return 'LIKE LOWER(%s)' % rhs
39
40
41 class Regex(NetFieldDecoratorMixin, lookups.Regex):
42 pass
43
44
45 class IRegex(NetFieldDecoratorMixin, lookups.IRegex):
46 pass
47
48
49 class NetContainsOrEquals(Lookup):
50 lookup_name = 'net_contains_or_equals'
51
52 def as_sql(self, qn, connection):
53 lhs, lhs_params = self.process_lhs(qn, connection)
54 rhs, rhs_params = self.process_rhs(qn, connection)
55 params = lhs_params + rhs_params
56 return '%s >>= %s' % (lhs, rhs), params
57
58
59 class NetContains(Lookup):
60 lookup_name = 'net_contains'
61
62 def as_sql(self, qn, connection):
63 lhs, lhs_params = self.process_lhs(qn, connection)
64 rhs, rhs_params = self.process_rhs(qn, connection)
65 params = lhs_params + rhs_params
66 return '%s >> %s' % (lhs, rhs), params
67
68
69 class NetContained(Lookup):
70 lookup_name = 'net_contained'
71
72 def as_sql(self, qn, connection):
73 lhs, lhs_params = self.process_lhs(qn, connection)
74 rhs, rhs_params = self.process_rhs(qn, connection)
75 params = lhs_params + rhs_params
76 return '%s << %s' % (lhs, rhs), params
77
78
79 class NetContainedOrEqual(Lookup):
80 lookup_name = 'net_contained_or_equal'
81
82 def as_sql(self, qn, connection):
83 lhs, lhs_params = self.process_lhs(qn, connection)
84 rhs, rhs_params = self.process_rhs(qn, connection)
85 params = lhs_params + rhs_params
86 return '%s <<= %s' % (lhs, rhs), params
87
88
89 class NetHost(Lookup):
90 lookup_name = 'net_host'
91
92 def as_sql(self, qn, connection):
93 lhs, lhs_params = self.process_lhs(qn, connection)
94 rhs, rhs_params = self.process_rhs(qn, connection)
95 # Query parameters are automatically converted to IPNetwork objects, which are then turned to strings. We need
96 # to omit the mask portion of the object's string representation to match PostgreSQL's HOST() function.
97 if rhs_params:
98 rhs_params[0] = rhs_params[0].split('/')[0]
99 params = lhs_params + rhs_params
100 return 'HOST(%s) = %s' % (lhs, rhs), params
101
102
103 class NetIn(Lookup):
104 lookup_name = 'net_in'
105
106 def get_prep_lookup(self):
107 # Don't cast the query value to a netaddr object, since it may or may not include a mask.
108 return self.rhs
109
110 def as_sql(self, qn, connection):
111 lhs, lhs_params = self.process_lhs(qn, connection)
112 rhs, rhs_params = self.process_rhs(qn, connection)
113 with_mask, without_mask = [], []
114 for address in rhs_params[0]:
115 if '/' in address:
116 with_mask.append(address)
117 else:
118 without_mask.append(address)
119
120 address_in_clause = self.create_in_clause('{} IN ('.format(lhs), len(with_mask))
121 host_in_clause = self.create_in_clause('HOST({}) IN ('.format(lhs), len(without_mask))
122
123 if with_mask and not without_mask:
124 return address_in_clause, with_mask
125 elif not with_mask and without_mask:
126 return host_in_clause, without_mask
127
128 in_clause = '({}) OR ({})'.format(address_in_clause, host_in_clause)
129 with_mask.extend(without_mask)
130 return in_clause, with_mask
131
132 @staticmethod
133 def create_in_clause(clause_part, max_size):
134 clause_elements = [clause_part]
135 for offset in range(0, max_size):
136 if offset > 0:
137 clause_elements.append(', ')
138 clause_elements.append('%s')
139 clause_elements.append(')')
140 return ''.join(clause_elements)
141
142
143 class NetHostContained(Lookup):
144 """
145 Check for the host portion of an IP address without regard to its mask. This allows us to find e.g. 192.0.2.1/24
146 when specifying a parent prefix of 192.0.2.0/26.
147 """
148 lookup_name = 'net_host_contained'
149
150 def as_sql(self, qn, connection):
151 lhs, lhs_params = self.process_lhs(qn, connection)
152 rhs, rhs_params = self.process_rhs(qn, connection)
153 params = lhs_params + rhs_params
154 return 'CAST(HOST(%s) AS INET) << %s' % (lhs, rhs), params
155
156
157 class NetFamily(Transform):
158 lookup_name = 'family'
159 function = 'FAMILY'
160
161 @property
162 def output_field(self):
163 return IntegerField()
164
165
166 class NetMaskLength(Transform):
167 function = 'MASKLEN'
168 lookup_name = 'net_mask_length'
169
170 @property
171 def output_field(self):
172 return IntegerField()
173
174
175 class Host(Transform):
176 function = 'HOST'
177 lookup_name = 'host'
178
179
180 class Inet(Transform):
181 function = 'INET'
182 lookup_name = 'inet'
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/ipam/lookups.py b/netbox/ipam/lookups.py
--- a/netbox/ipam/lookups.py
+++ b/netbox/ipam/lookups.py
@@ -151,7 +151,7 @@
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
- return 'CAST(HOST(%s) AS INET) << %s' % (lhs, rhs), params
+ return 'CAST(HOST(%s) AS INET) <<= %s' % (lhs, rhs), params
class NetFamily(Transform):
|
{"golden_diff": "diff --git a/netbox/ipam/lookups.py b/netbox/ipam/lookups.py\n--- a/netbox/ipam/lookups.py\n+++ b/netbox/ipam/lookups.py\n@@ -151,7 +151,7 @@\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n- return 'CAST(HOST(%s) AS INET) << %s' % (lhs, rhs), params\n+ return 'CAST(HOST(%s) AS INET) <<= %s' % (lhs, rhs), params\n \n \n class NetFamily(Transform):\n", "issue": "/32 IP's not automatically represented in /32 subnet.\n### NetBox version\n\nv2.11.7\n\n### Python version\n\n3.7\n\n### Steps to Reproduce\n\n1) Have a /32 IP address,\r\n\r\n2) Create a /32 Subnet for that IP address\r\n\r\n3) In the new subnet on the IP addresses tab observe IP's. \n\n### Expected Behavior\n\nIP address reference should be observed and not the ability to assign an IP address in the subnet.\n\n### Observed Behavior\n\n'Available IP' Button is present instead of a reference to the IP address as per 1).\n", "before_files": [{"content": "from django.db.models import IntegerField, Lookup, Transform, lookups\n\n\nclass NetFieldDecoratorMixin(object):\n\n def process_lhs(self, qn, connection, lhs=None):\n lhs = lhs or self.lhs\n lhs_string, lhs_params = qn.compile(lhs)\n lhs_string = 'TEXT(%s)' % lhs_string\n return lhs_string, lhs_params\n\n\nclass IExact(NetFieldDecoratorMixin, lookups.IExact):\n\n def get_rhs_op(self, connection, rhs):\n return '= LOWER(%s)' % rhs\n\n\nclass EndsWith(NetFieldDecoratorMixin, lookups.EndsWith):\n pass\n\n\nclass IEndsWith(NetFieldDecoratorMixin, lookups.IEndsWith):\n pass\n\n def get_rhs_op(self, connection, rhs):\n return 'LIKE LOWER(%s)' % rhs\n\n\nclass StartsWith(NetFieldDecoratorMixin, lookups.StartsWith):\n lookup_name = 'startswith'\n\n\nclass IStartsWith(NetFieldDecoratorMixin, lookups.IStartsWith):\n pass\n\n def get_rhs_op(self, connection, rhs):\n return 'LIKE LOWER(%s)' % rhs\n\n\nclass Regex(NetFieldDecoratorMixin, lookups.Regex):\n pass\n\n\nclass IRegex(NetFieldDecoratorMixin, lookups.IRegex):\n pass\n\n\nclass NetContainsOrEquals(Lookup):\n lookup_name = 'net_contains_or_equals'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s >>= %s' % (lhs, rhs), params\n\n\nclass NetContains(Lookup):\n lookup_name = 'net_contains'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s >> %s' % (lhs, rhs), params\n\n\nclass NetContained(Lookup):\n lookup_name = 'net_contained'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s << %s' % (lhs, rhs), params\n\n\nclass NetContainedOrEqual(Lookup):\n lookup_name = 'net_contained_or_equal'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s <<= %s' % (lhs, rhs), params\n\n\nclass NetHost(Lookup):\n lookup_name = 'net_host'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n # Query parameters are automatically converted to IPNetwork objects, which are then turned to strings. We need\n # to omit the mask portion of the object's string representation to match PostgreSQL's HOST() function.\n if rhs_params:\n rhs_params[0] = rhs_params[0].split('/')[0]\n params = lhs_params + rhs_params\n return 'HOST(%s) = %s' % (lhs, rhs), params\n\n\nclass NetIn(Lookup):\n lookup_name = 'net_in'\n\n def get_prep_lookup(self):\n # Don't cast the query value to a netaddr object, since it may or may not include a mask.\n return self.rhs\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n with_mask, without_mask = [], []\n for address in rhs_params[0]:\n if '/' in address:\n with_mask.append(address)\n else:\n without_mask.append(address)\n\n address_in_clause = self.create_in_clause('{} IN ('.format(lhs), len(with_mask))\n host_in_clause = self.create_in_clause('HOST({}) IN ('.format(lhs), len(without_mask))\n\n if with_mask and not without_mask:\n return address_in_clause, with_mask\n elif not with_mask and without_mask:\n return host_in_clause, without_mask\n\n in_clause = '({}) OR ({})'.format(address_in_clause, host_in_clause)\n with_mask.extend(without_mask)\n return in_clause, with_mask\n\n @staticmethod\n def create_in_clause(clause_part, max_size):\n clause_elements = [clause_part]\n for offset in range(0, max_size):\n if offset > 0:\n clause_elements.append(', ')\n clause_elements.append('%s')\n clause_elements.append(')')\n return ''.join(clause_elements)\n\n\nclass NetHostContained(Lookup):\n \"\"\"\n Check for the host portion of an IP address without regard to its mask. This allows us to find e.g. 192.0.2.1/24\n when specifying a parent prefix of 192.0.2.0/26.\n \"\"\"\n lookup_name = 'net_host_contained'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return 'CAST(HOST(%s) AS INET) << %s' % (lhs, rhs), params\n\n\nclass NetFamily(Transform):\n lookup_name = 'family'\n function = 'FAMILY'\n\n @property\n def output_field(self):\n return IntegerField()\n\n\nclass NetMaskLength(Transform):\n function = 'MASKLEN'\n lookup_name = 'net_mask_length'\n\n @property\n def output_field(self):\n return IntegerField()\n\n\nclass Host(Transform):\n function = 'HOST'\n lookup_name = 'host'\n\n\nclass Inet(Transform):\n function = 'INET'\n lookup_name = 'inet'\n", "path": "netbox/ipam/lookups.py"}], "after_files": [{"content": "from django.db.models import IntegerField, Lookup, Transform, lookups\n\n\nclass NetFieldDecoratorMixin(object):\n\n def process_lhs(self, qn, connection, lhs=None):\n lhs = lhs or self.lhs\n lhs_string, lhs_params = qn.compile(lhs)\n lhs_string = 'TEXT(%s)' % lhs_string\n return lhs_string, lhs_params\n\n\nclass IExact(NetFieldDecoratorMixin, lookups.IExact):\n\n def get_rhs_op(self, connection, rhs):\n return '= LOWER(%s)' % rhs\n\n\nclass EndsWith(NetFieldDecoratorMixin, lookups.EndsWith):\n pass\n\n\nclass IEndsWith(NetFieldDecoratorMixin, lookups.IEndsWith):\n pass\n\n def get_rhs_op(self, connection, rhs):\n return 'LIKE LOWER(%s)' % rhs\n\n\nclass StartsWith(NetFieldDecoratorMixin, lookups.StartsWith):\n lookup_name = 'startswith'\n\n\nclass IStartsWith(NetFieldDecoratorMixin, lookups.IStartsWith):\n pass\n\n def get_rhs_op(self, connection, rhs):\n return 'LIKE LOWER(%s)' % rhs\n\n\nclass Regex(NetFieldDecoratorMixin, lookups.Regex):\n pass\n\n\nclass IRegex(NetFieldDecoratorMixin, lookups.IRegex):\n pass\n\n\nclass NetContainsOrEquals(Lookup):\n lookup_name = 'net_contains_or_equals'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s >>= %s' % (lhs, rhs), params\n\n\nclass NetContains(Lookup):\n lookup_name = 'net_contains'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s >> %s' % (lhs, rhs), params\n\n\nclass NetContained(Lookup):\n lookup_name = 'net_contained'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s << %s' % (lhs, rhs), params\n\n\nclass NetContainedOrEqual(Lookup):\n lookup_name = 'net_contained_or_equal'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return '%s <<= %s' % (lhs, rhs), params\n\n\nclass NetHost(Lookup):\n lookup_name = 'net_host'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n # Query parameters are automatically converted to IPNetwork objects, which are then turned to strings. We need\n # to omit the mask portion of the object's string representation to match PostgreSQL's HOST() function.\n if rhs_params:\n rhs_params[0] = rhs_params[0].split('/')[0]\n params = lhs_params + rhs_params\n return 'HOST(%s) = %s' % (lhs, rhs), params\n\n\nclass NetIn(Lookup):\n lookup_name = 'net_in'\n\n def get_prep_lookup(self):\n # Don't cast the query value to a netaddr object, since it may or may not include a mask.\n return self.rhs\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n with_mask, without_mask = [], []\n for address in rhs_params[0]:\n if '/' in address:\n with_mask.append(address)\n else:\n without_mask.append(address)\n\n address_in_clause = self.create_in_clause('{} IN ('.format(lhs), len(with_mask))\n host_in_clause = self.create_in_clause('HOST({}) IN ('.format(lhs), len(without_mask))\n\n if with_mask and not without_mask:\n return address_in_clause, with_mask\n elif not with_mask and without_mask:\n return host_in_clause, without_mask\n\n in_clause = '({}) OR ({})'.format(address_in_clause, host_in_clause)\n with_mask.extend(without_mask)\n return in_clause, with_mask\n\n @staticmethod\n def create_in_clause(clause_part, max_size):\n clause_elements = [clause_part]\n for offset in range(0, max_size):\n if offset > 0:\n clause_elements.append(', ')\n clause_elements.append('%s')\n clause_elements.append(')')\n return ''.join(clause_elements)\n\n\nclass NetHostContained(Lookup):\n \"\"\"\n Check for the host portion of an IP address without regard to its mask. This allows us to find e.g. 192.0.2.1/24\n when specifying a parent prefix of 192.0.2.0/26.\n \"\"\"\n lookup_name = 'net_host_contained'\n\n def as_sql(self, qn, connection):\n lhs, lhs_params = self.process_lhs(qn, connection)\n rhs, rhs_params = self.process_rhs(qn, connection)\n params = lhs_params + rhs_params\n return 'CAST(HOST(%s) AS INET) <<= %s' % (lhs, rhs), params\n\n\nclass NetFamily(Transform):\n lookup_name = 'family'\n function = 'FAMILY'\n\n @property\n def output_field(self):\n return IntegerField()\n\n\nclass NetMaskLength(Transform):\n function = 'MASKLEN'\n lookup_name = 'net_mask_length'\n\n @property\n def output_field(self):\n return IntegerField()\n\n\nclass Host(Transform):\n function = 'HOST'\n lookup_name = 'host'\n\n\nclass Inet(Transform):\n function = 'INET'\n lookup_name = 'inet'\n", "path": "netbox/ipam/lookups.py"}]}
| 2,199 | 153 |
gh_patches_debug_11542
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-1404
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
assert len(indices) == self.total_size error during multiple GPU training
I am trying to train my dataset on 8 GPU's. However, after calling `./dist_train.sh` this error assertion appeares:
Traceback (most recent call last):
File "./tools/train.py", line 113, in <module>
main()
File "./tools/train.py", line 109, in main
logger=logger)
File "/mmdetection/mmdet/apis/train.py", line 58, in train_detector
_dist_train(model, dataset, cfg, validate=validate)
File "/mmdetection/mmdet/apis/train.py", line 186, in _dist_train
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
File "/opt/conda/lib/python3.6/site-packages/mmcv/runner/runner.py", line 358, in run
epoch_runner(data_loaders[i], **kwargs)
File "/opt/conda/lib/python3.6/site-packages/mmcv/runner/runner.py", line 260, in train
for i, data_batch in enumerate(data_loader):
File "/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 193, in __iter__ return _DataLoaderIter(self)
File "/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 493, in __init__
self._put_indices()
File "/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 591, in _put_indices
indices = next(self.sample_iter, None)
File "/opt/conda/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 172, in __iter__
for idx in self.sampler:
File "/mmdetection/mmdet/datasets/loader/sampler.py", line 138, in __iter__
assert len(indices) == self.total_size
...
in the config I tried various values for `imgs_per_gpu` and `workers_per_gpu`, currently it is:
`imgs_per_gpu=2,
workers_per_gpu=2,`
no settings was working though. Single-GPU training works well.
What is the meaning of this assert?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmdet/datasets/loader/sampler.py`
Content:
```
1 from __future__ import division
2 import math
3
4 import numpy as np
5 import torch
6 from mmcv.runner.utils import get_dist_info
7 from torch.utils.data import DistributedSampler as _DistributedSampler
8 from torch.utils.data import Sampler
9
10
11 class DistributedSampler(_DistributedSampler):
12
13 def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
14 super().__init__(dataset, num_replicas=num_replicas, rank=rank)
15 self.shuffle = shuffle
16
17 def __iter__(self):
18 # deterministically shuffle based on epoch
19 if self.shuffle:
20 g = torch.Generator()
21 g.manual_seed(self.epoch)
22 indices = torch.randperm(len(self.dataset), generator=g).tolist()
23 else:
24 indices = torch.arange(len(self.dataset)).tolist()
25
26 # add extra samples to make it evenly divisible
27 indices += indices[:(self.total_size - len(indices))]
28 assert len(indices) == self.total_size
29
30 # subsample
31 indices = indices[self.rank:self.total_size:self.num_replicas]
32 assert len(indices) == self.num_samples
33
34 return iter(indices)
35
36
37 class GroupSampler(Sampler):
38
39 def __init__(self, dataset, samples_per_gpu=1):
40 assert hasattr(dataset, 'flag')
41 self.dataset = dataset
42 self.samples_per_gpu = samples_per_gpu
43 self.flag = dataset.flag.astype(np.int64)
44 self.group_sizes = np.bincount(self.flag)
45 self.num_samples = 0
46 for i, size in enumerate(self.group_sizes):
47 self.num_samples += int(np.ceil(
48 size / self.samples_per_gpu)) * self.samples_per_gpu
49
50 def __iter__(self):
51 indices = []
52 for i, size in enumerate(self.group_sizes):
53 if size == 0:
54 continue
55 indice = np.where(self.flag == i)[0]
56 assert len(indice) == size
57 np.random.shuffle(indice)
58 num_extra = int(np.ceil(size / self.samples_per_gpu)
59 ) * self.samples_per_gpu - len(indice)
60 indice = np.concatenate([indice, indice[:num_extra]])
61 indices.append(indice)
62 indices = np.concatenate(indices)
63 indices = [
64 indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
65 for i in np.random.permutation(
66 range(len(indices) // self.samples_per_gpu))
67 ]
68 indices = np.concatenate(indices)
69 indices = indices.astype(np.int64).tolist()
70 assert len(indices) == self.num_samples
71 return iter(indices)
72
73 def __len__(self):
74 return self.num_samples
75
76
77 class DistributedGroupSampler(Sampler):
78 """Sampler that restricts data loading to a subset of the dataset.
79 It is especially useful in conjunction with
80 :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
81 process can pass a DistributedSampler instance as a DataLoader sampler,
82 and load a subset of the original dataset that is exclusive to it.
83 .. note::
84 Dataset is assumed to be of constant size.
85 Arguments:
86 dataset: Dataset used for sampling.
87 num_replicas (optional): Number of processes participating in
88 distributed training.
89 rank (optional): Rank of the current process within num_replicas.
90 """
91
92 def __init__(self,
93 dataset,
94 samples_per_gpu=1,
95 num_replicas=None,
96 rank=None):
97 _rank, _num_replicas = get_dist_info()
98 if num_replicas is None:
99 num_replicas = _num_replicas
100 if rank is None:
101 rank = _rank
102 self.dataset = dataset
103 self.samples_per_gpu = samples_per_gpu
104 self.num_replicas = num_replicas
105 self.rank = rank
106 self.epoch = 0
107
108 assert hasattr(self.dataset, 'flag')
109 self.flag = self.dataset.flag
110 self.group_sizes = np.bincount(self.flag)
111
112 self.num_samples = 0
113 for i, j in enumerate(self.group_sizes):
114 self.num_samples += int(
115 math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
116 self.num_replicas)) * self.samples_per_gpu
117 self.total_size = self.num_samples * self.num_replicas
118
119 def __iter__(self):
120 # deterministically shuffle based on epoch
121 g = torch.Generator()
122 g.manual_seed(self.epoch)
123
124 indices = []
125 for i, size in enumerate(self.group_sizes):
126 if size > 0:
127 indice = np.where(self.flag == i)[0]
128 assert len(indice) == size
129 indice = indice[list(torch.randperm(int(size),
130 generator=g))].tolist()
131 extra = int(
132 math.ceil(
133 size * 1.0 / self.samples_per_gpu / self.num_replicas)
134 ) * self.samples_per_gpu * self.num_replicas - len(indice)
135 indice += indice[:extra]
136 indices += indice
137
138 assert len(indices) == self.total_size
139
140 indices = [
141 indices[j] for i in list(
142 torch.randperm(
143 len(indices) // self.samples_per_gpu, generator=g))
144 for j in range(i * self.samples_per_gpu, (i + 1) *
145 self.samples_per_gpu)
146 ]
147
148 # subsample
149 offset = self.num_samples * self.rank
150 indices = indices[offset:offset + self.num_samples]
151 assert len(indices) == self.num_samples
152
153 return iter(indices)
154
155 def __len__(self):
156 return self.num_samples
157
158 def set_epoch(self, epoch):
159 self.epoch = epoch
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmdet/datasets/loader/sampler.py b/mmdet/datasets/loader/sampler.py
--- a/mmdet/datasets/loader/sampler.py
+++ b/mmdet/datasets/loader/sampler.py
@@ -132,8 +132,12 @@
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
- indice += indice[:extra]
- indices += indice
+ # pad indice
+ tmp = indice.copy()
+ for _ in range(extra // size):
+ indice.extend(tmp)
+ indice.extend(tmp[:extra % size])
+ indices.extend(indice)
assert len(indices) == self.total_size
|
{"golden_diff": "diff --git a/mmdet/datasets/loader/sampler.py b/mmdet/datasets/loader/sampler.py\n--- a/mmdet/datasets/loader/sampler.py\n+++ b/mmdet/datasets/loader/sampler.py\n@@ -132,8 +132,12 @@\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n- indice += indice[:extra]\n- indices += indice\n+ # pad indice\n+ tmp = indice.copy()\n+ for _ in range(extra // size):\n+ indice.extend(tmp)\n+ indice.extend(tmp[:extra % size])\n+ indices.extend(indice)\n \n assert len(indices) == self.total_size\n", "issue": "assert len(indices) == self.total_size error during multiple GPU training\nI am trying to train my dataset on 8 GPU's. However, after calling `./dist_train.sh` this error assertion appeares:\r\n\r\nTraceback (most recent call last): \r\n File \"./tools/train.py\", line 113, in <module> \r\n main() \r\n File \"./tools/train.py\", line 109, in main \r\n logger=logger) \r\n File \"/mmdetection/mmdet/apis/train.py\", line 58, in train_detector \r\n _dist_train(model, dataset, cfg, validate=validate) \r\n File \"/mmdetection/mmdet/apis/train.py\", line 186, in _dist_train \r\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs) \r\n File \"/opt/conda/lib/python3.6/site-packages/mmcv/runner/runner.py\", line 358, in run\r\n epoch_runner(data_loaders[i], **kwargs) \r\n File \"/opt/conda/lib/python3.6/site-packages/mmcv/runner/runner.py\", line 260, in train\r\n for i, data_batch in enumerate(data_loader): \r\n File \"/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py\", line 193, in __iter__ return _DataLoaderIter(self) \r\n File \"/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py\", line 493, in __init__\r\n self._put_indices() \r\n File \"/opt/conda/lib/python3.6/site-packages/torch/utils/data/dataloader.py\", line 591, in _put_indices\r\n indices = next(self.sample_iter, None) \r\n File \"/opt/conda/lib/python3.6/site-packages/torch/utils/data/sampler.py\", line 172, in __iter__\r\n for idx in self.sampler: \r\n File \"/mmdetection/mmdet/datasets/loader/sampler.py\", line 138, in __iter__ \r\n assert len(indices) == self.total_size \r\n...\r\n\r\nin the config I tried various values for `imgs_per_gpu` and `workers_per_gpu`, currently it is:\r\n`imgs_per_gpu=2,\r\nworkers_per_gpu=2,`\r\nno settings was working though. Single-GPU training works well.\r\n\r\nWhat is the meaning of this assert?\r\nThanks!\n", "before_files": [{"content": "from __future__ import division\nimport math\n\nimport numpy as np\nimport torch\nfrom mmcv.runner.utils import get_dist_info\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\nfrom torch.utils.data import Sampler\n\n\nclass DistributedSampler(_DistributedSampler):\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n\nclass GroupSampler(Sampler):\n\n def __init__(self, dataset, samples_per_gpu=1):\n assert hasattr(dataset, 'flag')\n self.dataset = dataset\n self.samples_per_gpu = samples_per_gpu\n self.flag = dataset.flag.astype(np.int64)\n self.group_sizes = np.bincount(self.flag)\n self.num_samples = 0\n for i, size in enumerate(self.group_sizes):\n self.num_samples += int(np.ceil(\n size / self.samples_per_gpu)) * self.samples_per_gpu\n\n def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n assert len(indice) == size\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = indices.astype(np.int64).tolist()\n assert len(indices) == self.num_samples\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n\nclass DistributedGroupSampler(Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n \"\"\"\n\n def __init__(self,\n dataset,\n samples_per_gpu=1,\n num_replicas=None,\n rank=None):\n _rank, _num_replicas = get_dist_info()\n if num_replicas is None:\n num_replicas = _num_replicas\n if rank is None:\n rank = _rank\n self.dataset = dataset\n self.samples_per_gpu = samples_per_gpu\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n\n assert hasattr(self.dataset, 'flag')\n self.flag = self.dataset.flag\n self.group_sizes = np.bincount(self.flag)\n\n self.num_samples = 0\n for i, j in enumerate(self.group_sizes):\n self.num_samples += int(\n math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /\n self.num_replicas)) * self.samples_per_gpu\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size > 0:\n indice = np.where(self.flag == i)[0]\n assert len(indice) == size\n indice = indice[list(torch.randperm(int(size),\n generator=g))].tolist()\n extra = int(\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n indice += indice[:extra]\n indices += indice\n\n assert len(indices) == self.total_size\n\n indices = [\n indices[j] for i in list(\n torch.randperm(\n len(indices) // self.samples_per_gpu, generator=g))\n for j in range(i * self.samples_per_gpu, (i + 1) *\n self.samples_per_gpu)\n ]\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n", "path": "mmdet/datasets/loader/sampler.py"}], "after_files": [{"content": "from __future__ import division\nimport math\n\nimport numpy as np\nimport torch\nfrom mmcv.runner.utils import get_dist_info\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\nfrom torch.utils.data import Sampler\n\n\nclass DistributedSampler(_DistributedSampler):\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n\nclass GroupSampler(Sampler):\n\n def __init__(self, dataset, samples_per_gpu=1):\n assert hasattr(dataset, 'flag')\n self.dataset = dataset\n self.samples_per_gpu = samples_per_gpu\n self.flag = dataset.flag.astype(np.int64)\n self.group_sizes = np.bincount(self.flag)\n self.num_samples = 0\n for i, size in enumerate(self.group_sizes):\n self.num_samples += int(np.ceil(\n size / self.samples_per_gpu)) * self.samples_per_gpu\n\n def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n assert len(indice) == size\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = indices.astype(np.int64).tolist()\n assert len(indices) == self.num_samples\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n\nclass DistributedGroupSampler(Sampler):\n \"\"\"Sampler that restricts data loading to a subset of the dataset.\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n .. note::\n Dataset is assumed to be of constant size.\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n \"\"\"\n\n def __init__(self,\n dataset,\n samples_per_gpu=1,\n num_replicas=None,\n rank=None):\n _rank, _num_replicas = get_dist_info()\n if num_replicas is None:\n num_replicas = _num_replicas\n if rank is None:\n rank = _rank\n self.dataset = dataset\n self.samples_per_gpu = samples_per_gpu\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n\n assert hasattr(self.dataset, 'flag')\n self.flag = self.dataset.flag\n self.group_sizes = np.bincount(self.flag)\n\n self.num_samples = 0\n for i, j in enumerate(self.group_sizes):\n self.num_samples += int(\n math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /\n self.num_replicas)) * self.samples_per_gpu\n self.total_size = self.num_samples * self.num_replicas\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size > 0:\n indice = np.where(self.flag == i)[0]\n assert len(indice) == size\n indice = indice[list(torch.randperm(int(size),\n generator=g))].tolist()\n extra = int(\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n # pad indice\n tmp = indice.copy()\n for _ in range(extra // size):\n indice.extend(tmp)\n indice.extend(tmp[:extra % size])\n indices.extend(indice)\n\n assert len(indices) == self.total_size\n\n indices = [\n indices[j] for i in list(\n torch.randperm(\n len(indices) // self.samples_per_gpu, generator=g))\n for j in range(i * self.samples_per_gpu, (i + 1) *\n self.samples_per_gpu)\n ]\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n", "path": "mmdet/datasets/loader/sampler.py"}]}
| 2,360 | 173 |
gh_patches_debug_13229
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-1921
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[libselinux] libselinux/:3.0 Missing symlinks=True in recipe
## Package and Environment Details
* Package Name/Version: **libselinux/3.0**
* Operating System+version: **Linux Fedora 31**
* Compiler+version: **GCC 9**
* Conan version: **conan 1.26.0**
* Python version: **Python 3.7.7**
## Conan profile
```
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=9
compiler.libcxx=libstdc++11
build_type=Release
[options]
[build_requires]
[env]
```
### Steps to reproduce
`conan install -o shared=True libselinux/3.0@`
```
$ ls -lh ~/.conan/data/libselinux/3.0/_/_/package/7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b/lib/
total 3.0M
-rwxr-xr-x. 1 forbinn forbinn 733K Jun 13 09:26 libselinux.so
-rwxr-xr-x. 1 forbinn forbinn 733K Jun 13 09:26 libselinux.so.1
-rwxr-xr-x. 1 forbinn forbinn 749K Jun 13 09:26 libsepol.so
-rwxr-xr-x. 1 forbinn forbinn 749K Jun 13 09:26 libsepol.so.1
```
### Logs
<details><summary>Click to expand log</summary>
```
Configuration:
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
compiler=gcc
compiler.libcxx=libstdc++11
compiler.version=9
os=Linux
os_build=Linux
[options]
shared=True
[build_requires]
[env]
Installing package: libselinux/3.0
Requirements
bzip2/1.0.8 from 'conan-center' - Cache
libselinux/3.0 from 'conan-center' - Cache
pcre2/10.33 from 'conan-center' - Cache
zlib/1.2.11 from 'conan-center' - Cache
Packages
bzip2/1.0.8:da606cf731e334010b0bf6e85a2a6f891b9f36b0 - Cache
libselinux/3.0:7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b - Download
pcre2/10.33:18903774d26ee0498535ef95198a1c997e4ca9ba - Cache
zlib/1.2.11:6af9cc7cb931c5ad942174fd7838eb655717c709 - Cache
Installing (downloading, building) binaries...
libselinux/3.0: Retrieving package 7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b from remote 'conan-center'
Downloading conanmanifest.txt completed [3.71k]
Downloading conaninfo.txt completed [0.88k]
Downloading conan_package.tgz completed [1249.37k]
Decompressing conan_package.tgz completed [0.00k]
libselinux/3.0: Package installed 7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b
libselinux/3.0: Downloaded package revision 0
bzip2/1.0.8: Already installed!
zlib/1.2.11: Already installed!
pcre2/10.33: Already installed!
```
</details>
## Notes
As you can see in the `ls` command output the `libselinux` and `libsepol` appears 2 times without symlinks. This is due to the fact that the current recipe do not set set `symlinks` flag to `True` inside the `package()` method.
Current [conanfile.py](https://github.com/conan-io/conan-center-index/blob/master/recipes/libselinux/all/conanfile.py#L59)
`self.copy(pattern="*.so*", dst="lib", src=library, keep_path=False)`
Expected:
`self.copy(pattern="*.so*", dst="lib", src=library, keep_path=False, symlinks=True)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/libselinux/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools, AutoToolsBuildEnvironment
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import glob
5
6
7 class LibSELinuxConan(ConanFile):
8 name = "libselinux"
9 description = "Security-enhanced Linux is a patch of the Linux kernel and a number of utilities with enhanced security functionality designed to add mandatory access controls to Linux"
10 topics = ("conan", "selinux", "security-enhanced linux")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/SELinuxProject/selinux"
13 license = "Unlicense" # This library (libselinux) is public domain software, i.e. not copyrighted
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False]}
16 default_options = {"shared": False, "fPIC": True}
17 requires = ("pcre2/10.33",)
18
19 def _get_subfolders(self):
20 _sepol_subfolder = "libsepol-%s" % self.version
21 _selinux_subfolder = "libselinux-%s" % self.version
22 return _sepol_subfolder, _selinux_subfolder
23
24 def configure(self):
25 del self.settings.compiler.libcxx
26 del self.settings.compiler.cppstd
27 if self.settings.os != "Linux":
28 raise ConanInvalidConfiguration("Only Linux is supported")
29
30 def build_requirements(self):
31 self.build_requires("flex/2.6.4")
32
33 def source(self):
34 for download in self.conan_data["sources"][self.version]:
35 tools.get(**download)
36
37 def build(self):
38 _sepol_subfolder, _selinux_subfolder = self._get_subfolders()
39 pcre_inc = os.path.join(self.deps_cpp_info["pcre2"].rootpath,
40 self.deps_cpp_info["pcre2"].includedirs[0])
41 pcre_libs = ' '.join(["-l%s" % lib for lib in self.deps_cpp_info["pcre2"].libs])
42 sepol_inc = os.path.join(self.source_folder, _sepol_subfolder, "include")
43 with tools.chdir(os.path.join(_sepol_subfolder, "src")):
44 args = ["libsepol.so.1" if self.options.shared else "libsepol.a"]
45 env_build = AutoToolsBuildEnvironment(self)
46 env_build.make(args=args)
47 with tools.chdir(os.path.join(_selinux_subfolder, "src")):
48 args = ["libselinux.so.1" if self.options.shared else "libselinux.a",
49 'PCRE_CFLAGS=-DPCRE2_CODE_UNIT_WIDTH=8 -DUSE_PCRE2=1 -I%s -I%s' % (pcre_inc, sepol_inc),
50 'PCRE_LDLIBS=%s' % pcre_libs]
51 env_build = AutoToolsBuildEnvironment(self)
52 env_build.make(args=args)
53
54 def package(self):
55 _sepol_subfolder, _selinux_subfolder = self._get_subfolders()
56 self.copy(pattern="LICENSE", dst="licenses", src=_selinux_subfolder)
57 for library in [_sepol_subfolder, _selinux_subfolder]:
58 self.copy(pattern="*.h", dst="include", src=os.path.join(library, "include"), keep_path=True)
59 self.copy(pattern="*.so*", dst="lib", src=library, keep_path=False)
60 self.copy(pattern="*.a", dst="lib", src=library, keep_path=False)
61
62 def package_info(self):
63 self.cpp_info.libs = ["selinux", "sepol"]
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/libselinux/all/conanfile.py b/recipes/libselinux/all/conanfile.py
--- a/recipes/libselinux/all/conanfile.py
+++ b/recipes/libselinux/all/conanfile.py
@@ -56,7 +56,7 @@
self.copy(pattern="LICENSE", dst="licenses", src=_selinux_subfolder)
for library in [_sepol_subfolder, _selinux_subfolder]:
self.copy(pattern="*.h", dst="include", src=os.path.join(library, "include"), keep_path=True)
- self.copy(pattern="*.so*", dst="lib", src=library, keep_path=False)
+ self.copy(pattern="*.so*", dst="lib", src=library, keep_path=False, symlinks=True)
self.copy(pattern="*.a", dst="lib", src=library, keep_path=False)
def package_info(self):
|
{"golden_diff": "diff --git a/recipes/libselinux/all/conanfile.py b/recipes/libselinux/all/conanfile.py\n--- a/recipes/libselinux/all/conanfile.py\n+++ b/recipes/libselinux/all/conanfile.py\n@@ -56,7 +56,7 @@\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=_selinux_subfolder)\n for library in [_sepol_subfolder, _selinux_subfolder]:\n self.copy(pattern=\"*.h\", dst=\"include\", src=os.path.join(library, \"include\"), keep_path=True)\n- self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False)\n+ self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.a\", dst=\"lib\", src=library, keep_path=False)\n \n def package_info(self):\n", "issue": "[libselinux] libselinux/:3.0 Missing symlinks=True in recipe\n## Package and Environment Details\r\n * Package Name/Version: **libselinux/3.0**\r\n * Operating System+version: **Linux Fedora 31**\r\n * Compiler+version: **GCC 9**\r\n * Conan version: **conan 1.26.0**\r\n * Python version: **Python 3.7.7**\r\n\r\n## Conan profile\r\n```\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=9\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n`conan install -o shared=True libselinux/3.0@`\r\n```\r\n$ ls -lh ~/.conan/data/libselinux/3.0/_/_/package/7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b/lib/\r\n\r\ntotal 3.0M\r\n-rwxr-xr-x. 1 forbinn forbinn 733K Jun 13 09:26 libselinux.so\r\n-rwxr-xr-x. 1 forbinn forbinn 733K Jun 13 09:26 libselinux.so.1\r\n-rwxr-xr-x. 1 forbinn forbinn 749K Jun 13 09:26 libsepol.so\r\n-rwxr-xr-x. 1 forbinn forbinn 749K Jun 13 09:26 libsepol.so.1\r\n```\r\n\r\n\r\n### Logs\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nConfiguration:\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=9\r\nos=Linux\r\nos_build=Linux\r\n[options]\r\nshared=True\r\n[build_requires]\r\n[env]\r\n\r\nInstalling package: libselinux/3.0\r\nRequirements\r\n bzip2/1.0.8 from 'conan-center' - Cache\r\n libselinux/3.0 from 'conan-center' - Cache\r\n pcre2/10.33 from 'conan-center' - Cache\r\n zlib/1.2.11 from 'conan-center' - Cache\r\nPackages\r\n bzip2/1.0.8:da606cf731e334010b0bf6e85a2a6f891b9f36b0 - Cache\r\n libselinux/3.0:7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b - Download\r\n pcre2/10.33:18903774d26ee0498535ef95198a1c997e4ca9ba - Cache\r\n zlib/1.2.11:6af9cc7cb931c5ad942174fd7838eb655717c709 - Cache\r\n\r\nInstalling (downloading, building) binaries...\r\nlibselinux/3.0: Retrieving package 7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b from remote 'conan-center' \r\nDownloading conanmanifest.txt completed [3.71k] \r\nDownloading conaninfo.txt completed [0.88k] \r\nDownloading conan_package.tgz completed [1249.37k] \r\nDecompressing conan_package.tgz completed [0.00k] \r\nlibselinux/3.0: Package installed 7a5a46e61ac4a00adf1fe45a7ed646d200a7c49b\r\nlibselinux/3.0: Downloaded package revision 0\r\nbzip2/1.0.8: Already installed!\r\nzlib/1.2.11: Already installed!\r\npcre2/10.33: Already installed!\r\n```\r\n\r\n</details>\r\n\r\n## Notes\r\n\r\nAs you can see in the `ls` command output the `libselinux` and `libsepol` appears 2 times without symlinks. This is due to the fact that the current recipe do not set set `symlinks` flag to `True` inside the `package()` method.\r\n\r\nCurrent [conanfile.py](https://github.com/conan-io/conan-center-index/blob/master/recipes/libselinux/all/conanfile.py#L59)\r\n`self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False)`\r\nExpected:\r\n`self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False, symlinks=True)`\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport glob\n\n\nclass LibSELinuxConan(ConanFile):\n name = \"libselinux\"\n description = \"Security-enhanced Linux is a patch of the Linux kernel and a number of utilities with enhanced security functionality designed to add mandatory access controls to Linux\"\n topics = (\"conan\", \"selinux\", \"security-enhanced linux\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/SELinuxProject/selinux\"\n license = \"Unlicense\" # This library (libselinux) is public domain software, i.e. not copyrighted\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n requires = (\"pcre2/10.33\",)\n\n def _get_subfolders(self):\n _sepol_subfolder = \"libsepol-%s\" % self.version\n _selinux_subfolder = \"libselinux-%s\" % self.version\n return _sepol_subfolder, _selinux_subfolder\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"Only Linux is supported\")\n\n def build_requirements(self):\n self.build_requires(\"flex/2.6.4\")\n\n def source(self):\n for download in self.conan_data[\"sources\"][self.version]:\n tools.get(**download)\n\n def build(self):\n _sepol_subfolder, _selinux_subfolder = self._get_subfolders()\n pcre_inc = os.path.join(self.deps_cpp_info[\"pcre2\"].rootpath,\n self.deps_cpp_info[\"pcre2\"].includedirs[0])\n pcre_libs = ' '.join([\"-l%s\" % lib for lib in self.deps_cpp_info[\"pcre2\"].libs])\n sepol_inc = os.path.join(self.source_folder, _sepol_subfolder, \"include\")\n with tools.chdir(os.path.join(_sepol_subfolder, \"src\")):\n args = [\"libsepol.so.1\" if self.options.shared else \"libsepol.a\"]\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=args)\n with tools.chdir(os.path.join(_selinux_subfolder, \"src\")):\n args = [\"libselinux.so.1\" if self.options.shared else \"libselinux.a\",\n 'PCRE_CFLAGS=-DPCRE2_CODE_UNIT_WIDTH=8 -DUSE_PCRE2=1 -I%s -I%s' % (pcre_inc, sepol_inc),\n 'PCRE_LDLIBS=%s' % pcre_libs]\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=args)\n\n def package(self):\n _sepol_subfolder, _selinux_subfolder = self._get_subfolders()\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=_selinux_subfolder)\n for library in [_sepol_subfolder, _selinux_subfolder]:\n self.copy(pattern=\"*.h\", dst=\"include\", src=os.path.join(library, \"include\"), keep_path=True)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False)\n self.copy(pattern=\"*.a\", dst=\"lib\", src=library, keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"selinux\", \"sepol\"]\n", "path": "recipes/libselinux/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport glob\n\n\nclass LibSELinuxConan(ConanFile):\n name = \"libselinux\"\n description = \"Security-enhanced Linux is a patch of the Linux kernel and a number of utilities with enhanced security functionality designed to add mandatory access controls to Linux\"\n topics = (\"conan\", \"selinux\", \"security-enhanced linux\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/SELinuxProject/selinux\"\n license = \"Unlicense\" # This library (libselinux) is public domain software, i.e. not copyrighted\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n requires = (\"pcre2/10.33\",)\n\n def _get_subfolders(self):\n _sepol_subfolder = \"libsepol-%s\" % self.version\n _selinux_subfolder = \"libselinux-%s\" % self.version\n return _sepol_subfolder, _selinux_subfolder\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n if self.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"Only Linux is supported\")\n\n def build_requirements(self):\n self.build_requires(\"flex/2.6.4\")\n\n def source(self):\n for download in self.conan_data[\"sources\"][self.version]:\n tools.get(**download)\n\n def build(self):\n _sepol_subfolder, _selinux_subfolder = self._get_subfolders()\n pcre_inc = os.path.join(self.deps_cpp_info[\"pcre2\"].rootpath,\n self.deps_cpp_info[\"pcre2\"].includedirs[0])\n pcre_libs = ' '.join([\"-l%s\" % lib for lib in self.deps_cpp_info[\"pcre2\"].libs])\n sepol_inc = os.path.join(self.source_folder, _sepol_subfolder, \"include\")\n with tools.chdir(os.path.join(_sepol_subfolder, \"src\")):\n args = [\"libsepol.so.1\" if self.options.shared else \"libsepol.a\"]\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=args)\n with tools.chdir(os.path.join(_selinux_subfolder, \"src\")):\n args = [\"libselinux.so.1\" if self.options.shared else \"libselinux.a\",\n 'PCRE_CFLAGS=-DPCRE2_CODE_UNIT_WIDTH=8 -DUSE_PCRE2=1 -I%s -I%s' % (pcre_inc, sepol_inc),\n 'PCRE_LDLIBS=%s' % pcre_libs]\n env_build = AutoToolsBuildEnvironment(self)\n env_build.make(args=args)\n\n def package(self):\n _sepol_subfolder, _selinux_subfolder = self._get_subfolders()\n self.copy(pattern=\"LICENSE\", dst=\"licenses\", src=_selinux_subfolder)\n for library in [_sepol_subfolder, _selinux_subfolder]:\n self.copy(pattern=\"*.h\", dst=\"include\", src=os.path.join(library, \"include\"), keep_path=True)\n self.copy(pattern=\"*.so*\", dst=\"lib\", src=library, keep_path=False, symlinks=True)\n self.copy(pattern=\"*.a\", dst=\"lib\", src=library, keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"selinux\", \"sepol\"]\n", "path": "recipes/libselinux/all/conanfile.py"}]}
| 2,336 | 197 |
gh_patches_debug_7307
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-9895
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] freeimage/3.18.0: Plugins aren't enabled
The preprocessor symbols used in the patches (WITH_JPEG etc.) are not defined by cmake.
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **freeimage/3.18.0**
* Operating System+version: **Windows 11**
* Compiler+version: **Visual Studio 2019**
* Conan version: **conan 1.46.2**
* Python version: **Python 3.10.3**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Windows
os_build=Windows
arch=x86_64
arch_build=x86_64
compiler=Visual Studio
compiler.version=16
```
### Steps to reproduce (Include if Applicable)
Load image with any optional plugin (JPEG, PNG, TIFF, etc). The test package didn't check for errors.
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
freeimage/3.18.0 (test package): Running test()
FreeImage 3.18.0, with:
- bmp
- ico
- jng
- koa
- iff,lbm
- mng
- pbm
- pbm
- pcd
- pcx
- pgm
- pgm
- ppm
- ppm
- ras
- tga,targa
- wap,wbmp,wbm
- psd,psb
- cut
- xbm
- xpm
- dds
- gif
- hdr
- sgi,rgb,rgba,bw
- pfm
- pct,pict,pic
```
Should be:
```
freeimage/3.18.0 (test package): Running test()
FreeImage 3.18.0, with:
- bmp
- ico
- jpg,jif,jpeg,jpe
- jng
- koa
- iff,lbm
- mng
- pbm
- pbm
- pcd
- pcx
- pgm
- pgm
- png
- ppm
- ppm
- ras
- tga,targa
- tif,tiff
- wap,wbmp,wbm
- psd,psb
- cut
- xbm
- xpm
- dds
- gif
- hdr
- sgi,rgb,rgba,bw
- exr
- j2k,j2c
- jp2
- pfm
- pct,pict,pic
- 3fr,arw,bay,bmq,cap,cine,cr2,crw,cs1,dc2,dcr,drf,dsc,dng,erf,fff,ia,iiq,k25,kc2,kdc,mdc,mef,mos,mrw,nef,nrw,orf,pef,ptx,pxn,qtk,raf,raw,rdc,rw2,rwl,rwz,sr2,srf,srw,sti,x3f
- webp
- jxr,wdp,hdp
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/freeimage/all/conanfile.py`
Content:
```
1 from conans import ConanFile, CMake, tools
2 import os
3 import shutil
4
5 required_conan_version = ">=1.43.0"
6
7
8 class FreeImageConan(ConanFile):
9 name = "freeimage"
10 description = "Open Source library project for developers who would like to support popular graphics image formats"\
11 "like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications."
12 homepage = "https://freeimage.sourceforge.io"
13 url = "https://github.com/conan-io/conan-center-index"
14 license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
15 topics = ("freeimage", "image", "decoding", "graphics")
16 generators = "cmake", "cmake_find_package"
17 settings = "os", "arch", "compiler", "build_type"
18 options = {
19 "shared": [True, False],
20 "fPIC": [True, False],
21 "with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
22 "with_png": [True, False],
23 "with_tiff": [True, False],
24 "with_jpeg2000": [True, False],
25 "with_openexr": [True, False],
26 "with_eigen": [True, False],
27 "with_webp": [True, False],
28 "with_raw": [True, False],
29 "with_jxr": [True, False],
30 }
31 default_options = {
32 "shared": False,
33 "fPIC": True,
34 "with_jpeg": "libjpeg",
35 "with_png": True,
36 "with_tiff": True,
37 "with_jpeg2000": True,
38 "with_openexr": True,
39 "with_eigen": True,
40 "with_webp": True,
41 "with_raw": True,
42 "with_jxr": True,
43 }
44
45 _cmake = None
46
47 @property
48 def _source_subfolder(self):
49 return "source_subfolder"
50
51 @property
52 def _build_subfolder(self):
53 return "build_subfolder"
54
55 def export_sources(self):
56 self.copy("CMakeLists.txt")
57 for patch in self.conan_data.get("patches", {}).get(self.version, []):
58 self.copy(patch["patch_file"])
59
60 def config_options(self):
61 if self.settings.os == "Windows":
62 del self.options.fPIC
63
64 def configure(self):
65 tools.check_min_cppstd(self, "11")
66 if self.options.shared:
67 del self.options.fPIC
68 self.output.warn("G3 plugin and JPEGTransform are disabled.")
69 if self.options.with_jpeg is not None:
70 if self.options.with_tiff:
71 self.options["libtiff"].jpeg = self.options.with_jpeg
72
73 def requirements(self):
74 self.requires("zlib/1.2.11")
75 if self.options.with_jpeg == "libjpeg":
76 self.requires("libjpeg/9d")
77 elif self.options.with_jpeg == "libjpeg-turbo":
78 self.requires("libjpeg-turbo/2.1.2")
79 if self.options.with_jpeg2000:
80 self.requires("openjpeg/2.4.0")
81 if self.options.with_png:
82 self.requires("libpng/1.6.37")
83 if self.options.with_webp:
84 self.requires("libwebp/1.2.2")
85 if self.options.with_openexr:
86 self.requires("openexr/2.5.7")
87 if self.options.with_raw:
88 self.requires("libraw/0.20.2")
89 if self.options.with_jxr:
90 self.requires("jxrlib/cci.20170615")
91 if self.options.with_tiff:
92 self.requires("libtiff/4.3.0")
93
94 def source(self):
95 tools.get(**self.conan_data["sources"][self.version],
96 destination=self._source_subfolder, strip_root=True)
97
98 def _configure_cmake(self):
99 if self._cmake:
100 return self._cmake
101 self._cmake = CMake(self)
102 self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False
103 self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000
104 self._cmake.definitions["WITH_PNG"] = self.options.with_png
105 self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
106 self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
107 self._cmake.definitions["WITH_RAW"] = self.options.with_raw
108 self._cmake.definitions["WITH_JXR"] = self.options.with_jxr
109 self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
110 self._cmake.configure(build_dir=self._build_subfolder)
111 return self._cmake
112
113 def build(self):
114 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG"))
115 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4"))
116 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG"))
117 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR"))
118 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP"))
119 tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite"))
120 tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR"))
121
122 for patch in self.conan_data.get("patches", {}).get(self.version, {}):
123 tools.patch(**patch)
124 cmake = self._configure_cmake()
125 cmake.build()
126
127 def package(self):
128 cmake = self._configure_cmake()
129 cmake.install()
130 self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder)
131 self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder)
132 self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder)
133
134 def package_info(self):
135 def imageformats_deps():
136 components = []
137 components.append("zlib::zlib")
138 if self.options.with_jpeg:
139 components.append("{0}::{0}".format(self.options.with_jpeg))
140 if self.options.with_jpeg2000:
141 components.append("openjpeg::openjpeg")
142 if self.options.with_png:
143 components.append("libpng::libpng")
144 if self.options.with_webp:
145 components.append("libwebp::libwebp")
146 if self.options.with_openexr:
147 components.append("openexr::openexr")
148 if self.options.with_raw:
149 components.append("libraw::libraw")
150 if self.options.with_jxr:
151 components.append("jxrlib::jxrlib")
152 if self.options.with_tiff:
153 components.append("libtiff::libtiff")
154 return components
155
156 self.cpp_info.names["pkg_config"] = "freeimage"
157 self.cpp_info.names["cmake_find_package"] = "FreeImage"
158 self.cpp_info.names["cmake_find_package_multi"] = "FreeImage"
159 self.cpp_info.components["FreeImage"].libs = ["freeimage"]
160 self.cpp_info.components["FreeImage"].requires = imageformats_deps()
161 self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"]
162 self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"]
163
164 if not self.options.shared:
165 self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/freeimage/all/conanfile.py b/recipes/freeimage/all/conanfile.py
--- a/recipes/freeimage/all/conanfile.py
+++ b/recipes/freeimage/all/conanfile.py
@@ -13,6 +13,7 @@
url = "https://github.com/conan-io/conan-center-index"
license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later"
topics = ("freeimage", "image", "decoding", "graphics")
+ short_paths = True
generators = "cmake", "cmake_find_package"
settings = "os", "arch", "compiler", "build_type"
options = {
|
{"golden_diff": "diff --git a/recipes/freeimage/all/conanfile.py b/recipes/freeimage/all/conanfile.py\n--- a/recipes/freeimage/all/conanfile.py\n+++ b/recipes/freeimage/all/conanfile.py\n@@ -13,6 +13,7 @@\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"FreeImage\", \"GPL-3.0-or-later\", \"GPL-2.0-or-later\"\n topics = (\"freeimage\", \"image\", \"decoding\", \"graphics\")\n+ short_paths = True\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n", "issue": "[package] freeimage/3.18.0: Plugins aren't enabled\nThe preprocessor symbols used in the patches (WITH_JPEG etc.) are not defined by cmake.\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **freeimage/3.18.0**\r\n * Operating System+version: **Windows 11**\r\n * Compiler+version: **Visual Studio 2019**\r\n * Conan version: **conan 1.46.2**\r\n * Python version: **Python 3.10.3**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nLoad image with any optional plugin (JPEG, PNG, TIFF, etc). The test package didn't check for errors.\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nfreeimage/3.18.0 (test package): Running test()\r\nFreeImage 3.18.0, with:\r\n\t- bmp\r\n\t- ico\r\n\t- jng\r\n\t- koa\r\n\t- iff,lbm\r\n\t- mng\r\n\t- pbm\r\n\t- pbm\r\n\t- pcd\r\n\t- pcx\r\n\t- pgm\r\n\t- pgm\r\n\t- ppm\r\n\t- ppm\r\n\t- ras\r\n\t- tga,targa\r\n\t- wap,wbmp,wbm\r\n\t- psd,psb\r\n\t- cut\r\n\t- xbm\r\n\t- xpm\r\n\t- dds\r\n\t- gif\r\n\t- hdr\r\n\t- sgi,rgb,rgba,bw\r\n\t- pfm\r\n\t- pct,pict,pic\r\n```\r\n\r\nShould be:\r\n\r\n```\r\nfreeimage/3.18.0 (test package): Running test()\r\nFreeImage 3.18.0, with:\r\n\t- bmp\r\n\t- ico\r\n\t- jpg,jif,jpeg,jpe\r\n\t- jng\r\n\t- koa\r\n\t- iff,lbm\r\n\t- mng\r\n\t- pbm\r\n\t- pbm\r\n\t- pcd\r\n\t- pcx\r\n\t- pgm\r\n\t- pgm\r\n\t- png\r\n\t- ppm\r\n\t- ppm\r\n\t- ras\r\n\t- tga,targa\r\n\t- tif,tiff\r\n\t- wap,wbmp,wbm\r\n\t- psd,psb\r\n\t- cut\r\n\t- xbm\r\n\t- xpm\r\n\t- dds\r\n\t- gif\r\n\t- hdr\r\n\t- sgi,rgb,rgba,bw\r\n\t- exr\r\n\t- j2k,j2c\r\n\t- jp2\r\n\t- pfm\r\n\t- pct,pict,pic\r\n\t- 3fr,arw,bay,bmq,cap,cine,cr2,crw,cs1,dc2,dcr,drf,dsc,dng,erf,fff,ia,iiq,k25,kc2,kdc,mdc,mef,mos,mrw,nef,nrw,orf,pef,ptx,pxn,qtk,raf,raw,rdc,rw2,rwl,rwz,sr2,srf,srw,sti,x3f\r\n\t- webp\r\n\t- jxr,wdp,hdp\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass FreeImageConan(ConanFile):\n name = \"freeimage\"\n description = \"Open Source library project for developers who would like to support popular graphics image formats\"\\\n \"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications.\"\n homepage = \"https://freeimage.sourceforge.io\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"FreeImage\", \"GPL-3.0-or-later\", \"GPL-2.0-or-later\"\n topics = (\"freeimage\", \"image\", \"decoding\", \"graphics\")\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_jpeg\": [False, \"libjpeg\", \"libjpeg-turbo\"],\n \"with_png\": [True, False],\n \"with_tiff\": [True, False],\n \"with_jpeg2000\": [True, False],\n \"with_openexr\": [True, False],\n \"with_eigen\": [True, False],\n \"with_webp\": [True, False],\n \"with_raw\": [True, False],\n \"with_jxr\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_jpeg\": \"libjpeg\",\n \"with_png\": True,\n \"with_tiff\": True,\n \"with_jpeg2000\": True,\n \"with_openexr\": True,\n \"with_eigen\": True,\n \"with_webp\": True,\n \"with_raw\": True,\n \"with_jxr\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n tools.check_min_cppstd(self, \"11\")\n if self.options.shared:\n del self.options.fPIC\n self.output.warn(\"G3 plugin and JPEGTransform are disabled.\")\n if self.options.with_jpeg is not None:\n if self.options.with_tiff:\n self.options[\"libtiff\"].jpeg = self.options.with_jpeg\n\n def requirements(self):\n self.requires(\"zlib/1.2.11\")\n if self.options.with_jpeg == \"libjpeg\":\n self.requires(\"libjpeg/9d\")\n elif self.options.with_jpeg == \"libjpeg-turbo\":\n self.requires(\"libjpeg-turbo/2.1.2\")\n if self.options.with_jpeg2000:\n self.requires(\"openjpeg/2.4.0\")\n if self.options.with_png:\n self.requires(\"libpng/1.6.37\")\n if self.options.with_webp:\n self.requires(\"libwebp/1.2.2\")\n if self.options.with_openexr:\n self.requires(\"openexr/2.5.7\")\n if self.options.with_raw:\n self.requires(\"libraw/0.20.2\")\n if self.options.with_jxr:\n self.requires(\"jxrlib/cci.20170615\")\n if self.options.with_tiff:\n self.requires(\"libtiff/4.3.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"WITH_JPEG\"] = self.options.with_jpeg != False\n self._cmake.definitions[\"WITH_OPENJPEG\"] = self.options.with_jpeg2000\n self._cmake.definitions[\"WITH_PNG\"] = self.options.with_png\n self._cmake.definitions[\"WITH_WEBP\"] = self.options.with_webp\n self._cmake.definitions[\"WITH_OPENEXR\"] = self.options.with_openexr\n self._cmake.definitions[\"WITH_RAW\"] = self.options.with_raw\n self._cmake.definitions[\"WITH_JXR\"] = self.options.with_jxr\n self._cmake.definitions[\"WITH_TIFF\"] = self.options.with_tiff\n self._cmake.configure(build_dir=self._build_subfolder)\n return self._cmake\n\n def build(self):\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibPNG\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibTIFF4\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibOpenJPEG\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibJXR\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibWebP\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibRawLite\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"OpenEXR\"))\n\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, {}):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(\"license-fi.txt\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"license-gplv3.txt\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"license-gplv2.txt\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n def imageformats_deps():\n components = []\n components.append(\"zlib::zlib\")\n if self.options.with_jpeg:\n components.append(\"{0}::{0}\".format(self.options.with_jpeg))\n if self.options.with_jpeg2000:\n components.append(\"openjpeg::openjpeg\")\n if self.options.with_png:\n components.append(\"libpng::libpng\")\n if self.options.with_webp:\n components.append(\"libwebp::libwebp\")\n if self.options.with_openexr:\n components.append(\"openexr::openexr\")\n if self.options.with_raw:\n components.append(\"libraw::libraw\")\n if self.options.with_jxr:\n components.append(\"jxrlib::jxrlib\")\n if self.options.with_tiff:\n components.append(\"libtiff::libtiff\")\n return components\n\n self.cpp_info.names[\"pkg_config\"] = \"freeimage\"\n self.cpp_info.names[\"cmake_find_package\"] = \"FreeImage\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"FreeImage\"\n self.cpp_info.components[\"FreeImage\"].libs = [\"freeimage\"]\n self.cpp_info.components[\"FreeImage\"].requires = imageformats_deps()\n self.cpp_info.components[\"FreeImagePlus\"].libs = [\"freeimageplus\"]\n self.cpp_info.components[\"FreeImagePlus\"].requires = [\"FreeImage\"]\n\n if not self.options.shared:\n self.cpp_info.components[\"FreeImage\"].defines.append(\"FREEIMAGE_LIB\")\n", "path": "recipes/freeimage/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\nimport shutil\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass FreeImageConan(ConanFile):\n name = \"freeimage\"\n description = \"Open Source library project for developers who would like to support popular graphics image formats\"\\\n \"like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications.\"\n homepage = \"https://freeimage.sourceforge.io\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"FreeImage\", \"GPL-3.0-or-later\", \"GPL-2.0-or-later\"\n topics = (\"freeimage\", \"image\", \"decoding\", \"graphics\")\n short_paths = True\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_jpeg\": [False, \"libjpeg\", \"libjpeg-turbo\"],\n \"with_png\": [True, False],\n \"with_tiff\": [True, False],\n \"with_jpeg2000\": [True, False],\n \"with_openexr\": [True, False],\n \"with_eigen\": [True, False],\n \"with_webp\": [True, False],\n \"with_raw\": [True, False],\n \"with_jxr\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_jpeg\": \"libjpeg\",\n \"with_png\": True,\n \"with_tiff\": True,\n \"with_jpeg2000\": True,\n \"with_openexr\": True,\n \"with_eigen\": True,\n \"with_webp\": True,\n \"with_raw\": True,\n \"with_jxr\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def export_sources(self):\n self.copy(\"CMakeLists.txt\")\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n self.copy(patch[\"patch_file\"])\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n tools.check_min_cppstd(self, \"11\")\n if self.options.shared:\n del self.options.fPIC\n self.output.warn(\"G3 plugin and JPEGTransform are disabled.\")\n if self.options.with_jpeg is not None:\n if self.options.with_tiff:\n self.options[\"libtiff\"].jpeg = self.options.with_jpeg\n\n def requirements(self):\n self.requires(\"zlib/1.2.11\")\n if self.options.with_jpeg == \"libjpeg\":\n self.requires(\"libjpeg/9d\")\n elif self.options.with_jpeg == \"libjpeg-turbo\":\n self.requires(\"libjpeg-turbo/2.1.2\")\n if self.options.with_jpeg2000:\n self.requires(\"openjpeg/2.4.0\")\n if self.options.with_png:\n self.requires(\"libpng/1.6.37\")\n if self.options.with_webp:\n self.requires(\"libwebp/1.2.2\")\n if self.options.with_openexr:\n self.requires(\"openexr/2.5.7\")\n if self.options.with_raw:\n self.requires(\"libraw/0.20.2\")\n if self.options.with_jxr:\n self.requires(\"jxrlib/cci.20170615\")\n if self.options.with_tiff:\n self.requires(\"libtiff/4.3.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"WITH_JPEG\"] = self.options.with_jpeg != False\n self._cmake.definitions[\"WITH_OPENJPEG\"] = self.options.with_jpeg2000\n self._cmake.definitions[\"WITH_PNG\"] = self.options.with_png\n self._cmake.definitions[\"WITH_WEBP\"] = self.options.with_webp\n self._cmake.definitions[\"WITH_OPENEXR\"] = self.options.with_openexr\n self._cmake.definitions[\"WITH_RAW\"] = self.options.with_raw\n self._cmake.definitions[\"WITH_JXR\"] = self.options.with_jxr\n self._cmake.definitions[\"WITH_TIFF\"] = self.options.with_tiff\n self._cmake.configure(build_dir=self._build_subfolder)\n return self._cmake\n\n def build(self):\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibPNG\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibTIFF4\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibOpenJPEG\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibJXR\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibWebP\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"LibRawLite\"))\n tools.rmdir(os.path.join(self._source_subfolder, \"Source\", \"OpenEXR\"))\n\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, {}):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n self.copy(\"license-fi.txt\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"license-gplv3.txt\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"license-gplv2.txt\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n def imageformats_deps():\n components = []\n components.append(\"zlib::zlib\")\n if self.options.with_jpeg:\n components.append(\"{0}::{0}\".format(self.options.with_jpeg))\n if self.options.with_jpeg2000:\n components.append(\"openjpeg::openjpeg\")\n if self.options.with_png:\n components.append(\"libpng::libpng\")\n if self.options.with_webp:\n components.append(\"libwebp::libwebp\")\n if self.options.with_openexr:\n components.append(\"openexr::openexr\")\n if self.options.with_raw:\n components.append(\"libraw::libraw\")\n if self.options.with_jxr:\n components.append(\"jxrlib::jxrlib\")\n if self.options.with_tiff:\n components.append(\"libtiff::libtiff\")\n return components\n\n self.cpp_info.names[\"pkg_config\"] = \"freeimage\"\n self.cpp_info.names[\"cmake_find_package\"] = \"FreeImage\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"FreeImage\"\n self.cpp_info.components[\"FreeImage\"].libs = [\"freeimage\"]\n self.cpp_info.components[\"FreeImage\"].requires = imageformats_deps()\n self.cpp_info.components[\"FreeImagePlus\"].libs = [\"freeimageplus\"]\n self.cpp_info.components[\"FreeImagePlus\"].requires = [\"FreeImage\"]\n\n if not self.options.shared:\n self.cpp_info.components[\"FreeImage\"].defines.append(\"FREEIMAGE_LIB\")\n", "path": "recipes/freeimage/all/conanfile.py"}]}
| 3,146 | 160 |
gh_patches_debug_3385
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1601
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CSRF in /admin/payments/batch/new_filled/
### Describe the bug
The admin page to create a new batch is on the url /admin/payments/batch/new_filled/, but this works with a GET request and is thus vulnerable to cross site request forgery. I believe no csrf token is checked
### How to reproduce
Steps to reproduce the behaviour:
1. Go to /admin/payments/batch/new_filled/
2. A new batch is created without user action
### Expected behaviour
Some action on the concrexit site should create a POST request, preferably including a csrf token
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/payments/admin_views.py`
Content:
```
1 """Admin views provided by the payments package."""
2 import csv
3
4 from django.apps import apps
5 from django.contrib import messages
6 from django.contrib.admin.utils import model_ngettext
7 from django.contrib.admin.views.decorators import staff_member_required
8 from django.contrib.auth.decorators import permission_required
9 from django.db.models import Sum, Count, Min, Max
10 from django.http import HttpResponse
11 from django.core.exceptions import SuspiciousOperation, DisallowedRedirect
12 from django.shortcuts import redirect, get_object_or_404, render
13 from django.utils import timezone
14 from django.utils.text import capfirst
15 from django.utils.decorators import method_decorator
16 from django.utils.http import url_has_allowed_host_and_scheme
17 from django.utils.translation import gettext_lazy as _
18 from django.views import View
19
20 from payments import services
21 from .models import Payment, Batch, PaymentUser
22
23
24 @method_decorator(staff_member_required, name="dispatch")
25 @method_decorator(
26 permission_required("payments.process_payments"), name="dispatch",
27 )
28 class PaymentAdminView(View):
29 """View that creates a payment."""
30
31 def post(self, request, *args, app_label, model_name, payable, **kwargs):
32 if "type" not in request.POST:
33 raise SuspiciousOperation("Missing POST parameters")
34
35 if "next" in request.POST and not url_has_allowed_host_and_scheme(
36 request.POST.get("next"), allowed_hosts={request.get_host()}
37 ):
38 raise DisallowedRedirect
39
40 payable_model = apps.get_model(app_label=app_label, model_name=model_name)
41 payable_obj = payable_model.objects.get(pk=payable)
42
43 result = services.create_payment(
44 payable_obj, self.request.member, request.POST["type"],
45 )
46 payable_obj.save()
47
48 if result:
49 messages.success(
50 request, _("Successfully paid %s.") % model_ngettext(payable_obj, 1),
51 )
52 else:
53 messages.error(
54 request, _("Could not pay %s.") % model_ngettext(payable_obj, 1),
55 )
56 return redirect(f"admin:{app_label}_{model_name}_change", payable_obj.pk)
57
58 if "next" in request.POST:
59 return redirect(request.POST["next"])
60
61 return redirect("admin:payments_payment_change", result.pk)
62
63
64 @method_decorator(staff_member_required, name="dispatch")
65 @method_decorator(
66 permission_required("payments.process_batches"), name="dispatch",
67 )
68 class BatchProcessAdminView(View):
69 """View that processes a batch."""
70
71 def post(self, request, *args, **kwargs):
72 batch = Batch.objects.get(pk=kwargs["pk"])
73
74 if "next" in request.POST and not url_has_allowed_host_and_scheme(
75 request.POST.get("next"), allowed_hosts={request.get_host()}
76 ):
77 raise DisallowedRedirect
78
79 if batch.processed:
80 messages.error(
81 request, _("{} already processed.").format(model_ngettext(batch, 1))
82 )
83 else:
84 services.process_batch(batch)
85 messages.success(
86 request,
87 _("Successfully processed {}.").format(model_ngettext(batch, 1)),
88 )
89
90 if "next" in request.POST:
91 return redirect(request.POST["next"])
92
93 return redirect("admin:payments_batch_change", kwargs["pk"])
94
95
96 @method_decorator(staff_member_required, name="dispatch")
97 @method_decorator(
98 permission_required("payments.process_batches"), name="dispatch",
99 )
100 class BatchExportAdminView(View):
101 """View that exports a batch."""
102
103 def post(self, request, *args, **kwargs):
104 batch = Batch.objects.get(pk=kwargs["pk"])
105
106 response = HttpResponse(content_type="text/csv")
107 response["Content-Disposition"] = 'attachment;filename="batch.csv"'
108 writer = csv.writer(response)
109 headers = [
110 _("Account holder"),
111 _("IBAN"),
112 _("Mandate Reference"),
113 _("Amount"),
114 _("Description"),
115 _("Mandate Date"),
116 ]
117 writer.writerow([capfirst(x) for x in headers])
118
119 member_rows = batch.payments_set.values("paid_by").annotate(total=Sum("amount"))
120
121 for row in member_rows:
122 member = PaymentUser.objects.get(id=row["paid_by"])
123 bankaccount = member.bank_accounts.last()
124 writer.writerow(
125 [
126 bankaccount.name,
127 bankaccount.iban,
128 bankaccount.mandate_no,
129 f"{row['total']:.2f}",
130 batch.description,
131 bankaccount.valid_from,
132 ]
133 )
134 return response
135
136
137 @method_decorator(staff_member_required, name="dispatch")
138 @method_decorator(
139 permission_required("payments.process_batches"), name="dispatch",
140 )
141 class BatchTopicExportAdminView(View):
142 """View that exports a batch per topic."""
143
144 def post(self, request, *args, **kwargs):
145 batch = Batch.objects.get(pk=kwargs["pk"])
146
147 response = HttpResponse(content_type="text/csv")
148 response["Content-Disposition"] = 'attachment;filename="batch-topic.csv"'
149 writer = csv.writer(response)
150 headers = [
151 _("Topic"),
152 _("No. of payments"),
153 _("First payment"),
154 _("Last payment"),
155 _("Total amount"),
156 ]
157 writer.writerow([capfirst(x) for x in headers])
158
159 topic_rows = (
160 batch.payments_set.values("topic")
161 .annotate(
162 total=Sum("amount"),
163 count=Count("paid_by"),
164 min_date=Min("created_at"),
165 max_date=Max("created_at"),
166 )
167 .order_by("topic")
168 )
169
170 for row in topic_rows:
171 writer.writerow(
172 [
173 row["topic"],
174 row["count"],
175 timezone.localtime(row["min_date"]).date(),
176 timezone.localtime(row["max_date"]).date(),
177 f"{row['total']:.2f}",
178 ]
179 )
180 return response
181
182
183 @method_decorator(staff_member_required, name="dispatch")
184 @method_decorator(
185 permission_required("payments.process_batches"), name="dispatch",
186 )
187 class BatchTopicDescriptionAdminView(View):
188 """Shows the topic export as plain text."""
189
190 template_name = "admin/payments/batch_topic.html"
191
192 def post(self, request, *args, **kwargs):
193 context = {}
194 batch = get_object_or_404(Batch, pk=kwargs["pk"])
195 topic_rows = (
196 batch.payments_set.values("topic")
197 .annotate(
198 total=Sum("amount"),
199 count=Count("paid_by"),
200 min_date=Min("created_at"),
201 max_date=Max("created_at"),
202 )
203 .order_by("topic")
204 )
205
206 description = f"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\n"
207 for row in topic_rows:
208 description += f"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total €{row['total']:.2f}\n"
209 description += f"\n{batch.description}"
210
211 context["batch"] = batch
212 context["description"] = description
213 return render(request, self.template_name, context)
214
215
216 @method_decorator(staff_member_required, name="dispatch")
217 @method_decorator(
218 permission_required("payments.process_batches"), name="dispatch",
219 )
220 class BatchNewFilledAdminView(View):
221 """View that adds a new batch filled with all payments that where not already in a batch."""
222
223 def get(self, request, *args, **kwargs):
224 batch = Batch()
225 batch.save()
226
227 payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)
228
229 payments.update(batch=batch)
230
231 return redirect("admin:payments_batch_change", object_id=batch.id)
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py
--- a/website/payments/admin_views.py
+++ b/website/payments/admin_views.py
@@ -220,7 +220,7 @@
class BatchNewFilledAdminView(View):
"""View that adds a new batch filled with all payments that where not already in a batch."""
- def get(self, request, *args, **kwargs):
+ def post(self, request, *args, **kwargs):
batch = Batch()
batch.save()
|
{"golden_diff": "diff --git a/website/payments/admin_views.py b/website/payments/admin_views.py\n--- a/website/payments/admin_views.py\n+++ b/website/payments/admin_views.py\n@@ -220,7 +220,7 @@\n class BatchNewFilledAdminView(View):\n \"\"\"View that adds a new batch filled with all payments that where not already in a batch.\"\"\"\n \n- def get(self, request, *args, **kwargs):\n+ def post(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n", "issue": "CSRF in /admin/payments/batch/new_filled/\n### Describe the bug\r\nThe admin page to create a new batch is on the url /admin/payments/batch/new_filled/, but this works with a GET request and is thus vulnerable to cross site request forgery. I believe no csrf token is checked\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to /admin/payments/batch/new_filled/\r\n2. A new batch is created without user action\r\n\r\n### Expected behaviour\r\nSome action on the concrexit site should create a POST request, preferably including a csrf token\r\n\n", "before_files": [{"content": "\"\"\"Admin views provided by the payments package.\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom payments import services\nfrom .models import Payment, Batch, PaymentUser\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"View that creates a payment.\"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, self.request.member, request.POST[\"type\"],\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"View that processes a batch.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n services.process_batch(batch)\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"View that exports a batch.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = PaymentUser.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n bankaccount.name,\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"View that exports a batch per topic.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicDescriptionAdminView(View):\n \"\"\"Shows the topic export as plain text.\"\"\"\n\n template_name = \"admin/payments/batch_topic.html\"\n\n def post(self, request, *args, **kwargs):\n context = {}\n batch = get_object_or_404(Batch, pk=kwargs[\"pk\"])\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n description = f\"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\\n\"\n for row in topic_rows:\n description += f\"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total \u20ac{row['total']:.2f}\\n\"\n description += f\"\\n{batch.description}\"\n\n context[\"batch\"] = batch\n context[\"description\"] = description\n return render(request, self.template_name, context)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"View that adds a new batch filled with all payments that where not already in a batch.\"\"\"\n\n def get(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}], "after_files": [{"content": "\"\"\"Admin views provided by the payments package.\"\"\"\nimport csv\n\nfrom django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import permission_required\nfrom django.db.models import Sum, Count, Min, Max\nfrom django.http import HttpResponse\nfrom django.core.exceptions import SuspiciousOperation, DisallowedRedirect\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom django.utils import timezone\nfrom django.utils.text import capfirst\nfrom django.utils.decorators import method_decorator\nfrom django.utils.http import url_has_allowed_host_and_scheme\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom payments import services\nfrom .models import Payment, Batch, PaymentUser\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_payments\"), name=\"dispatch\",\n)\nclass PaymentAdminView(View):\n \"\"\"View that creates a payment.\"\"\"\n\n def post(self, request, *args, app_label, model_name, payable, **kwargs):\n if \"type\" not in request.POST:\n raise SuspiciousOperation(\"Missing POST parameters\")\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n payable_model = apps.get_model(app_label=app_label, model_name=model_name)\n payable_obj = payable_model.objects.get(pk=payable)\n\n result = services.create_payment(\n payable_obj, self.request.member, request.POST[\"type\"],\n )\n payable_obj.save()\n\n if result:\n messages.success(\n request, _(\"Successfully paid %s.\") % model_ngettext(payable_obj, 1),\n )\n else:\n messages.error(\n request, _(\"Could not pay %s.\") % model_ngettext(payable_obj, 1),\n )\n return redirect(f\"admin:{app_label}_{model_name}_change\", payable_obj.pk)\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_payment_change\", result.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchProcessAdminView(View):\n \"\"\"View that processes a batch.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n if \"next\" in request.POST and not url_has_allowed_host_and_scheme(\n request.POST.get(\"next\"), allowed_hosts={request.get_host()}\n ):\n raise DisallowedRedirect\n\n if batch.processed:\n messages.error(\n request, _(\"{} already processed.\").format(model_ngettext(batch, 1))\n )\n else:\n services.process_batch(batch)\n messages.success(\n request,\n _(\"Successfully processed {}.\").format(model_ngettext(batch, 1)),\n )\n\n if \"next\" in request.POST:\n return redirect(request.POST[\"next\"])\n\n return redirect(\"admin:payments_batch_change\", kwargs[\"pk\"])\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchExportAdminView(View):\n \"\"\"View that exports a batch.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Account holder\"),\n _(\"IBAN\"),\n _(\"Mandate Reference\"),\n _(\"Amount\"),\n _(\"Description\"),\n _(\"Mandate Date\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n member_rows = batch.payments_set.values(\"paid_by\").annotate(total=Sum(\"amount\"))\n\n for row in member_rows:\n member = PaymentUser.objects.get(id=row[\"paid_by\"])\n bankaccount = member.bank_accounts.last()\n writer.writerow(\n [\n bankaccount.name,\n bankaccount.iban,\n bankaccount.mandate_no,\n f\"{row['total']:.2f}\",\n batch.description,\n bankaccount.valid_from,\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicExportAdminView(View):\n \"\"\"View that exports a batch per topic.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch.objects.get(pk=kwargs[\"pk\"])\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = 'attachment;filename=\"batch-topic.csv\"'\n writer = csv.writer(response)\n headers = [\n _(\"Topic\"),\n _(\"No. of payments\"),\n _(\"First payment\"),\n _(\"Last payment\"),\n _(\"Total amount\"),\n ]\n writer.writerow([capfirst(x) for x in headers])\n\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n for row in topic_rows:\n writer.writerow(\n [\n row[\"topic\"],\n row[\"count\"],\n timezone.localtime(row[\"min_date\"]).date(),\n timezone.localtime(row[\"max_date\"]).date(),\n f\"{row['total']:.2f}\",\n ]\n )\n return response\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchTopicDescriptionAdminView(View):\n \"\"\"Shows the topic export as plain text.\"\"\"\n\n template_name = \"admin/payments/batch_topic.html\"\n\n def post(self, request, *args, **kwargs):\n context = {}\n batch = get_object_or_404(Batch, pk=kwargs[\"pk\"])\n topic_rows = (\n batch.payments_set.values(\"topic\")\n .annotate(\n total=Sum(\"amount\"),\n count=Count(\"paid_by\"),\n min_date=Min(\"created_at\"),\n max_date=Max(\"created_at\"),\n )\n .order_by(\"topic\")\n )\n\n description = f\"Batch {batch.id} - {batch.processing_date if batch.processing_date else timezone.now().date()}:\\n\"\n for row in topic_rows:\n description += f\"- {row['topic']} ({row['count']}x) [{timezone.localtime(row['min_date']).date()} -- {timezone.localtime(row['max_date']).date()}], total \u20ac{row['total']:.2f}\\n\"\n description += f\"\\n{batch.description}\"\n\n context[\"batch\"] = batch\n context[\"description\"] = description\n return render(request, self.template_name, context)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"payments.process_batches\"), name=\"dispatch\",\n)\nclass BatchNewFilledAdminView(View):\n \"\"\"View that adds a new batch filled with all payments that where not already in a batch.\"\"\"\n\n def post(self, request, *args, **kwargs):\n batch = Batch()\n batch.save()\n\n payments = Payment.objects.filter(type=Payment.TPAY, batch=None,)\n\n payments.update(batch=batch)\n\n return redirect(\"admin:payments_batch_change\", object_id=batch.id)\n", "path": "website/payments/admin_views.py"}]}
| 2,602 | 120 |
gh_patches_debug_943
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Debugging of modules using -m is broken
Used to be able to debug Flask & other code using the `-m` flag.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/debugger.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7 # import the wrapper first, so that it gets a chance
8 # to detour pydevd socket functionality.
9 import ptvsd.wrapper
10
11
12 __author__ = "Microsoft Corporation <[email protected]>"
13 __version__ = "4.0.0a4"
14
15 DONT_DEBUG = []
16
17
18 def debug(filename, port_num, debug_id, debug_options, run_as):
19 # TODO: docstring
20 address = (None, port_num)
21 if run_as == 'module':
22 _run_module(address, filename)
23 else:
24 _run_file(address, filename)
25
26
27 def _run_module(address, modname):
28 filename = modname + ':'
29 argv = _run_argv(address, filename)
30 argv.append('--module')
31 _run(argv)
32
33
34 def _run_file(address, filename):
35 argv = _run_argv(address, filename)
36 _run(argv)
37
38
39 def _run_argv(address, filename):
40 host, port = address
41 if host is None:
42 host = '127.0.0.1'
43 return [
44 '--port', str(port),
45 '--client', host,
46 '--file', filename,
47 ]
48
49
50 def _run(argv):
51 import pydevd
52 sys.argv[1:0] = argv
53 try:
54 pydevd.main()
55 except SystemExit as ex:
56 ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)
57 raise
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -27,7 +27,7 @@
def _run_module(address, modname):
filename = modname + ':'
argv = _run_argv(address, filename)
- argv.append('--module')
+ argv.insert(argv.index('--file'), '--module')
_run(argv)
|
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -27,7 +27,7 @@\n def _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n- argv.append('--module')\n+ argv.insert(argv.index('--file'), '--module')\n _run(argv)\n", "issue": "Debugging of modules using -m is broken\nUsed to be able to debug Flask & other code using the `-m` flag.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n# import the wrapper first, so that it gets a chance\n# to detour pydevd socket functionality.\nimport ptvsd.wrapper\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a4\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n _run_module(address, filename)\n else:\n _run_file(address, filename)\n\n\ndef _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n argv.append('--module')\n _run(argv)\n\n\ndef _run_file(address, filename):\n argv = _run_argv(address, filename)\n _run(argv)\n\n\ndef _run_argv(address, filename):\n host, port = address\n if host is None:\n host = '127.0.0.1'\n return [\n '--port', str(port),\n '--client', host,\n '--file', filename,\n ]\n\n\ndef _run(argv):\n import pydevd\n sys.argv[1:0] = argv\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n# import the wrapper first, so that it gets a chance\n# to detour pydevd socket functionality.\nimport ptvsd.wrapper\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a3\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n _run_module(address, filename)\n else:\n _run_file(address, filename)\n\n\ndef _run_module(address, modname):\n filename = modname + ':'\n argv = _run_argv(address, filename)\n argv.insert(argv.index('--file'), '--module')\n _run(argv)\n\n\ndef _run_file(address, filename):\n argv = _run_argv(address, filename)\n _run(argv)\n\n\ndef _run_argv(address, filename):\n host, port = address\n if host is None:\n host = '127.0.0.1'\n return [\n '--port', str(port),\n '--client', host,\n '--file', filename,\n ]\n\n\ndef _run(argv):\n import pydevd\n sys.argv[1:0] = argv\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py"}]}
| 744 | 103 |
gh_patches_debug_8163
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-84
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Debugging modules (using -m switch)
This is currently not supported
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/debugger.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7
8 __author__ = "Microsoft Corporation <[email protected]>"
9 __version__ = "4.0.0a1"
10
11 DONT_DEBUG = []
12
13
14 def debug(filename, port_num, debug_id, debug_options, run_as):
15 # TODO: docstring
16
17 # import the wrapper first, so that it gets a chance
18 # to detour pydevd socket functionality.
19 import ptvsd.wrapper
20 import pydevd
21
22 sys.argv[1:0] = [
23 '--port', str(port_num),
24 '--client', '127.0.0.1',
25 '--file', filename,
26 ]
27 try:
28 pydevd.main()
29 except SystemExit as ex:
30 ptvsd.wrapper.ptvsd_sys_exit_code = ex.code
31 raise
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -19,11 +19,16 @@
import ptvsd.wrapper
import pydevd
- sys.argv[1:0] = [
+ args = [
'--port', str(port_num),
'--client', '127.0.0.1',
- '--file', filename,
]
+ if run_as == 'module':
+ args.append('--module')
+ args.extend(('--file', filename + ":"))
+ else:
+ args.extend(('--file', filename))
+ sys.argv[1:0] = args
try:
pydevd.main()
except SystemExit as ex:
|
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -19,11 +19,16 @@\n import ptvsd.wrapper\n import pydevd\n \n- sys.argv[1:0] = [\n+ args = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n- '--file', filename,\n ]\n+ if run_as == 'module':\n+ args.append('--module')\n+ args.extend(('--file', filename + \":\"))\n+ else:\n+ args.extend(('--file', filename))\n+ sys.argv[1:0] = args\n try:\n pydevd.main()\n except SystemExit as ex:\n", "issue": "Debugging modules (using -m switch)\nThis is currently not supported\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a1\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n\n # import the wrapper first, so that it gets a chance\n # to detour pydevd socket functionality.\n import ptvsd.wrapper\n import pydevd\n\n sys.argv[1:0] = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n '--file', filename,\n ]\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = ex.code\n raise\n", "path": "ptvsd/debugger.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a1\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n\n # import the wrapper first, so that it gets a chance\n # to detour pydevd socket functionality.\n import ptvsd.wrapper\n import pydevd\n\n args = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n ]\n if run_as == 'module':\n args.append('--module')\n args.extend(('--file', filename + \":\"))\n else:\n args.extend(('--file', filename))\n sys.argv[1:0] = args\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = ex.code\n raise\n", "path": "ptvsd/debugger.py"}]}
| 542 | 185 |
gh_patches_debug_33624
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-2375
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TVer] Extractor error (caused by KeyError('publisher_id')) for some video URLs
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2021.12.27**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [x] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
Japan
### Description
I get extractor errors when downloading certain videos from TVer. There are some video URLs (such as https://tver.jp/corner/f0095338) that can be downloaded successfully, but there are also some live video URLs (such as https://tver.jp/feature/f0095053) that are now videos, but instead is a LIVE broadcast prepared for the future. The URLs I just introduced have an expiration date for viewing, so please be careful.
### Verbose log
```shell
[debug] Command-line config: ['-Uv', 'https://tver.jp/feature/f0095053']
[debug] Encodings: locale cp932, fs utf-8, out utf-8, err utf-8, pref cp932
[debug] yt-dlp version 2021.12.27 [6223f67a8]
[debug] Python version 3.8.12 (CPython 64bit) - Windows-10-10.0.19044-SP0
[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev
[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2021.12.27, Current version: 2021.12.27
yt-dlp is up to date (2021.12.27)
[TVer] Downloading JSON metadata
[debug] [TVer] Extracting URL: https://tver.jp/feature/f0095053
[TVer] f0095053: Downloading JSON metadata
ERROR: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 599, in extract
ie_result = self._real_extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\tver.py", line 44, in _real_extract
p_id = main['publisher_id']
KeyError: 'publisher_id'
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 599, in extract
ie_result = self._real_extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\tver.py", line 44, in _real_extract
p_id = main['publisher_id']
KeyError: 'publisher_id'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\YoutubeDL.py", line 1353, in wrapper
return func(self, *args, **kwargs)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\YoutubeDL.py", line 1423, in __extract_info
ie_result = ie.extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 629, in extract
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
yt_dlp.utils.ExtractorError: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.
```
[TVer] Extractor error (caused by KeyError('publisher_id')) for some video URLs
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2021.12.27**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [x] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
Japan
### Description
I get extractor errors when downloading certain videos from TVer. There are some video URLs (such as https://tver.jp/corner/f0095338) that can be downloaded successfully, but there are also some live video URLs (such as https://tver.jp/feature/f0095053) that are now videos, but instead is a LIVE broadcast prepared for the future. The URLs I just introduced have an expiration date for viewing, so please be careful.
### Verbose log
```shell
[debug] Command-line config: ['-Uv', 'https://tver.jp/feature/f0095053']
[debug] Encodings: locale cp932, fs utf-8, out utf-8, err utf-8, pref cp932
[debug] yt-dlp version 2021.12.27 [6223f67a8]
[debug] Python version 3.8.12 (CPython 64bit) - Windows-10-10.0.19044-SP0
[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev
[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2021.12.27, Current version: 2021.12.27
yt-dlp is up to date (2021.12.27)
[TVer] Downloading JSON metadata
[debug] [TVer] Extracting URL: https://tver.jp/feature/f0095053
[TVer] f0095053: Downloading JSON metadata
ERROR: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 599, in extract
ie_result = self._real_extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\tver.py", line 44, in _real_extract
p_id = main['publisher_id']
KeyError: 'publisher_id'
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 599, in extract
ie_result = self._real_extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\tver.py", line 44, in _real_extract
p_id = main['publisher_id']
KeyError: 'publisher_id'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\YoutubeDL.py", line 1353, in wrapper
return func(self, *args, **kwargs)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\YoutubeDL.py", line 1423, in __extract_info
ie_result = ie.extract(url)
File "C:\Users\User\anaconda3\lib\site-packages\yt_dlp\extractor\common.py", line 629, in extract
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
yt_dlp.utils.ExtractorError: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/tver.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4
5 from .common import InfoExtractor
6 from ..compat import compat_str
7 from ..utils import (
8 int_or_none,
9 remove_start,
10 smuggle_url,
11 try_get,
12 )
13
14
15 class TVerIE(InfoExtractor):
16 _VALID_URL = r'https?://(?:www\.)?tver\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\d+))'
17 # videos are only available for 7 days
18 _TESTS = [{
19 'url': 'https://tver.jp/corner/f0062178',
20 'only_matching': True,
21 }, {
22 'url': 'https://tver.jp/feature/f0062413',
23 'only_matching': True,
24 }, {
25 'url': 'https://tver.jp/episode/79622438',
26 'only_matching': True,
27 }, {
28 # subtitle = ' '
29 'url': 'https://tver.jp/corner/f0068870',
30 'only_matching': True,
31 }]
32 _TOKEN = None
33 BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
34
35 def _real_initialize(self):
36 self._TOKEN = self._download_json(
37 'https://tver.jp/api/access_token.php', None)['token']
38
39 def _real_extract(self, url):
40 path, video_id = self._match_valid_url(url).groups()
41 main = self._download_json(
42 'https://api.tver.jp/v4/' + path, video_id,
43 query={'token': self._TOKEN})['main']
44 p_id = main['publisher_id']
45 service = remove_start(main['service'], 'ts_')
46
47 r_id = main['reference_id']
48 if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
49 r_id = 'ref:' + r_id
50 bc_url = smuggle_url(
51 self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),
52 {'geo_countries': ['JP']})
53
54 return {
55 '_type': 'url_transparent',
56 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
57 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
58 'url': bc_url,
59 'ie_key': 'BrightcoveNew',
60 }
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/tver.py b/yt_dlp/extractor/tver.py
--- a/yt_dlp/extractor/tver.py
+++ b/yt_dlp/extractor/tver.py
@@ -5,10 +5,11 @@
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
+ ExtractorError,
int_or_none,
remove_start,
smuggle_url,
- try_get,
+ traverse_obj,
)
@@ -38,13 +39,18 @@
def _real_extract(self, url):
path, video_id = self._match_valid_url(url).groups()
- main = self._download_json(
+ api_response = self._download_json(
'https://api.tver.jp/v4/' + path, video_id,
- query={'token': self._TOKEN})['main']
- p_id = main['publisher_id']
- service = remove_start(main['service'], 'ts_')
+ query={'token': self._TOKEN})
+ p_id = traverse_obj(api_response, ('main', 'publisher_id'))
+ if not p_id:
+ error_msg, expected = traverse_obj(api_response, ('episode', 0, 'textbar', 0, ('text', 'longer')), get_all=False), True
+ if not error_msg:
+ error_msg, expected = 'Failed to extract publisher ID', False
+ raise ExtractorError(error_msg, expected=expected)
+ service = remove_start(traverse_obj(api_response, ('main', 'service')), 'ts_')
- r_id = main['reference_id']
+ r_id = traverse_obj(api_response, ('main', 'reference_id'))
if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
r_id = 'ref:' + r_id
bc_url = smuggle_url(
@@ -53,8 +59,8 @@
return {
'_type': 'url_transparent',
- 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
- 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
+ 'description': traverse_obj(api_response, ('main', 'note', 0, 'text'), expected_type=compat_str),
+ 'episode_number': int_or_none(traverse_obj(api_response, ('main', 'ext', 'episode_number'), expected_type=compat_str)),
'url': bc_url,
'ie_key': 'BrightcoveNew',
}
|
{"golden_diff": "diff --git a/yt_dlp/extractor/tver.py b/yt_dlp/extractor/tver.py\n--- a/yt_dlp/extractor/tver.py\n+++ b/yt_dlp/extractor/tver.py\n@@ -5,10 +5,11 @@\n from .common import InfoExtractor\n from ..compat import compat_str\n from ..utils import (\n+ ExtractorError,\n int_or_none,\n remove_start,\n smuggle_url,\n- try_get,\n+ traverse_obj,\n )\n \n \n@@ -38,13 +39,18 @@\n \n def _real_extract(self, url):\n path, video_id = self._match_valid_url(url).groups()\n- main = self._download_json(\n+ api_response = self._download_json(\n 'https://api.tver.jp/v4/' + path, video_id,\n- query={'token': self._TOKEN})['main']\n- p_id = main['publisher_id']\n- service = remove_start(main['service'], 'ts_')\n+ query={'token': self._TOKEN})\n+ p_id = traverse_obj(api_response, ('main', 'publisher_id'))\n+ if not p_id:\n+ error_msg, expected = traverse_obj(api_response, ('episode', 0, 'textbar', 0, ('text', 'longer')), get_all=False), True\n+ if not error_msg:\n+ error_msg, expected = 'Failed to extract publisher ID', False\n+ raise ExtractorError(error_msg, expected=expected)\n+ service = remove_start(traverse_obj(api_response, ('main', 'service')), 'ts_')\n \n- r_id = main['reference_id']\n+ r_id = traverse_obj(api_response, ('main', 'reference_id'))\n if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n r_id = 'ref:' + r_id\n bc_url = smuggle_url(\n@@ -53,8 +59,8 @@\n \n return {\n '_type': 'url_transparent',\n- 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),\n- 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),\n+ 'description': traverse_obj(api_response, ('main', 'note', 0, 'text'), expected_type=compat_str),\n+ 'episode_number': int_or_none(traverse_obj(api_response, ('main', 'ext', 'episode_number'), expected_type=compat_str)),\n 'url': bc_url,\n 'ie_key': 'BrightcoveNew',\n }\n", "issue": "[TVer] Extractor error (caused by KeyError('publisher_id')) for some video URLs\n### Checklist\r\n\r\n- [X] I'm reporting a broken site\r\n- [X] I've verified that I'm running yt-dlp version **2021.12.27**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\r\n- [X] I've checked that all provided URLs are alive and playable in a browser\r\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [x] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\r\n\r\n### Region\r\n\r\nJapan\r\n\r\n### Description\r\n\r\nI get extractor errors when downloading certain videos from TVer. There are some video URLs (such as https://tver.jp/corner/f0095338) that can be downloaded successfully, but there are also some live video URLs (such as https://tver.jp/feature/f0095053) that are now videos, but instead is a LIVE broadcast prepared for the future. The URLs I just introduced have an expiration date for viewing, so please be careful.\r\n\r\n### Verbose log\r\n\r\n```shell\r\n[debug] Command-line config: ['-Uv', 'https://tver.jp/feature/f0095053']\r\n[debug] Encodings: locale cp932, fs utf-8, out utf-8, err utf-8, pref cp932\r\n[debug] yt-dlp version 2021.12.27 [6223f67a8]\r\n[debug] Python version 3.8.12 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\r\n[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2021.12.27, Current version: 2021.12.27\r\nyt-dlp is up to date (2021.12.27)\r\n[TVer] Downloading JSON metadata\r\n[debug] [TVer] Extracting URL: https://tver.jp/feature/f0095053\r\n[TVer] f0095053: Downloading JSON metadata\r\nERROR: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 599, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\tver.py\", line 44, in _real_extract\r\n p_id = main['publisher_id']\r\nKeyError: 'publisher_id'\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 599, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\tver.py\", line 44, in _real_extract\r\n p_id = main['publisher_id']\r\nKeyError: 'publisher_id'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\YoutubeDL.py\", line 1353, in wrapper\r\n return func(self, *args, **kwargs)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\YoutubeDL.py\", line 1423, in __extract_info\r\n ie_result = ie.extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 629, in extract\r\n raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))\r\nyt_dlp.utils.ExtractorError: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.\r\n```\r\n\n[TVer] Extractor error (caused by KeyError('publisher_id')) for some video URLs\n### Checklist\r\n\r\n- [X] I'm reporting a broken site\r\n- [X] I've verified that I'm running yt-dlp version **2021.12.27**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\r\n- [X] I've checked that all provided URLs are alive and playable in a browser\r\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\r\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\r\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\r\n- [x] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\r\n\r\n### Region\r\n\r\nJapan\r\n\r\n### Description\r\n\r\nI get extractor errors when downloading certain videos from TVer. There are some video URLs (such as https://tver.jp/corner/f0095338) that can be downloaded successfully, but there are also some live video URLs (such as https://tver.jp/feature/f0095053) that are now videos, but instead is a LIVE broadcast prepared for the future. The URLs I just introduced have an expiration date for viewing, so please be careful.\r\n\r\n### Verbose log\r\n\r\n```shell\r\n[debug] Command-line config: ['-Uv', 'https://tver.jp/feature/f0095053']\r\n[debug] Encodings: locale cp932, fs utf-8, out utf-8, err utf-8, pref cp932\r\n[debug] yt-dlp version 2021.12.27 [6223f67a8]\r\n[debug] Python version 3.8.12 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\r\n[debug] Optional libraries: Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2021.12.27, Current version: 2021.12.27\r\nyt-dlp is up to date (2021.12.27)\r\n[TVer] Downloading JSON metadata\r\n[debug] [TVer] Extracting URL: https://tver.jp/feature/f0095053\r\n[TVer] f0095053: Downloading JSON metadata\r\nERROR: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 599, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\tver.py\", line 44, in _real_extract\r\n p_id = main['publisher_id']\r\nKeyError: 'publisher_id'\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 599, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\tver.py\", line 44, in _real_extract\r\n p_id = main['publisher_id']\r\nKeyError: 'publisher_id'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\YoutubeDL.py\", line 1353, in wrapper\r\n return func(self, *args, **kwargs)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\YoutubeDL.py\", line 1423, in __extract_info\r\n ie_result = ie.extract(url)\r\n File \"C:\\Users\\User\\anaconda3\\lib\\site-packages\\yt_dlp\\extractor\\common.py\", line 629, in extract\r\n raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))\r\nyt_dlp.utils.ExtractorError: f0095053: An extractor error has occurred. (caused by KeyError('publisher_id')); please report this issue on https://github.com/yt-dlp/yt-dlp . Make sure you are using the latest version; see https://github.com/yt-dlp/yt-dlp on how to update. Be sure to call yt-dlp with the --verbose flag and include its complete output.\r\n```\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n int_or_none,\n remove_start,\n smuggle_url,\n try_get,\n)\n\n\nclass TVerIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?tver\\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\\d+))'\n # videos are only available for 7 days\n _TESTS = [{\n 'url': 'https://tver.jp/corner/f0062178',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/feature/f0062413',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/episode/79622438',\n 'only_matching': True,\n }, {\n # subtitle = ' '\n 'url': 'https://tver.jp/corner/f0068870',\n 'only_matching': True,\n }]\n _TOKEN = None\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'\n\n def _real_initialize(self):\n self._TOKEN = self._download_json(\n 'https://tver.jp/api/access_token.php', None)['token']\n\n def _real_extract(self, url):\n path, video_id = self._match_valid_url(url).groups()\n main = self._download_json(\n 'https://api.tver.jp/v4/' + path, video_id,\n query={'token': self._TOKEN})['main']\n p_id = main['publisher_id']\n service = remove_start(main['service'], 'ts_')\n\n r_id = main['reference_id']\n if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n r_id = 'ref:' + r_id\n bc_url = smuggle_url(\n self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),\n {'geo_countries': ['JP']})\n\n return {\n '_type': 'url_transparent',\n 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),\n 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),\n 'url': bc_url,\n 'ie_key': 'BrightcoveNew',\n }\n", "path": "yt_dlp/extractor/tver.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n remove_start,\n smuggle_url,\n traverse_obj,\n)\n\n\nclass TVerIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?tver\\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\\d+))'\n # videos are only available for 7 days\n _TESTS = [{\n 'url': 'https://tver.jp/corner/f0062178',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/feature/f0062413',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/episode/79622438',\n 'only_matching': True,\n }, {\n # subtitle = ' '\n 'url': 'https://tver.jp/corner/f0068870',\n 'only_matching': True,\n }]\n _TOKEN = None\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'\n\n def _real_initialize(self):\n self._TOKEN = self._download_json(\n 'https://tver.jp/api/access_token.php', None)['token']\n\n def _real_extract(self, url):\n path, video_id = self._match_valid_url(url).groups()\n api_response = self._download_json(\n 'https://api.tver.jp/v4/' + path, video_id,\n query={'token': self._TOKEN})\n p_id = traverse_obj(api_response, ('main', 'publisher_id'))\n if not p_id:\n error_msg, expected = traverse_obj(api_response, ('episode', 0, 'textbar', 0, ('text', 'longer')), get_all=False), True\n if not error_msg:\n error_msg, expected = 'Failed to extract publisher ID', False\n raise ExtractorError(error_msg, expected=expected)\n service = remove_start(traverse_obj(api_response, ('main', 'service')), 'ts_')\n\n r_id = traverse_obj(api_response, ('main', 'reference_id'))\n if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n r_id = 'ref:' + r_id\n bc_url = smuggle_url(\n self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),\n {'geo_countries': ['JP']})\n\n return {\n '_type': 'url_transparent',\n 'description': traverse_obj(api_response, ('main', 'note', 0, 'text'), expected_type=compat_str),\n 'episode_number': int_or_none(traverse_obj(api_response, ('main', 'ext', 'episode_number'), expected_type=compat_str)),\n 'url': bc_url,\n 'ie_key': 'BrightcoveNew',\n }\n", "path": "yt_dlp/extractor/tver.py"}]}
| 3,531 | 585 |
gh_patches_debug_16822
|
rasdani/github-patches
|
git_diff
|
tough-dev-school__education-backend-560
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User.DoesNotExist: User matching query does not exist.
Sentry Issue: [EDUCATION-BACKEND-23](https://sentry.io/organizations/f213/issues/2200858697/?referrer=github_integration)
```
User.DoesNotExist: User matching query does not exist.
File "celery/app/autoretry.py", line 34, in run
return task._orig_run(*args, **kwargs)
File "app/tasks.py", line 77, in subscribe_to_mailchimp
user=apps.get_model('users.User').objects.get(pk=user_id),
File "django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "django/db/models/query.py", line 435, in get
raise self.model.DoesNotExist(
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/app/tasks.py`
Content:
```
1 from typing import List, Union
2
3 from anymail.exceptions import AnymailRequestsAPIError
4 from django.apps import apps
5 from django.conf import settings
6 from requests.exceptions import RequestException
7
8 from app.celery import celery
9 from app.integrations import tg
10 from app.integrations.clickmeeting import ClickMeetingClient, ClickMeetingHTTPException
11 from app.integrations.mailchimp import AppMailchimp, MailchimpException
12 from app.integrations.zoomus import ZoomusClient, ZoomusHTTPException
13 from app.mail.owl import TemplOwl
14
15
16 @celery.task(
17 autoretry_for=[AnymailRequestsAPIError],
18 retry_kwargs={
19 'max_retries': 10,
20 'countdown': 5,
21 },
22 )
23 def send_mail(to: Union[List, str], template_id, subject: str = '', ctx: dict = None, disable_antispam=False):
24 TemplOwl(
25 to=to,
26 template_id=template_id,
27 subject=subject,
28 ctx=ctx,
29 disable_antispam=disable_antispam,
30 ).send()
31
32
33 @celery.task(
34 autoretry_for=[RequestException, ClickMeetingHTTPException],
35 retry_kwargs={
36 'max_retries': 10,
37 'countdown': 5,
38 },
39 )
40 def invite_to_clickmeeting(room_url: str, email: str):
41 client = ClickMeetingClient()
42 client.invite(room_url, email)
43
44
45 @celery.task(
46 autoretry_for=[RequestException, ZoomusHTTPException],
47 retry_kwargs={
48 'max_retries': 10,
49 'countdown': 5,
50 },
51 )
52 def invite_to_zoomus(webinar_id: str, user_id: int):
53 user = apps.get_model('users.User').objects.get(pk=user_id)
54
55 client = ZoomusClient()
56 client.invite(webinar_id, user)
57
58
59 @celery.task(
60 autoretry_for=[RequestException, MailchimpException],
61 retry_kwargs={
62 'max_retries': 10,
63 'countdown': 5,
64 },
65 )
66 def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):
67 if list_id is None:
68 list_id = settings.MAILCHIMP_CONTACT_LIST_ID
69
70 if not list_id:
71 return
72
73 mailchimp = AppMailchimp()
74
75 mailchimp.subscribe_django_user(
76 list_id=list_id,
77 user=apps.get_model('users.User').objects.get(pk=user_id),
78 tags=tags,
79 )
80
81
82 @celery.task
83 def send_happiness_message(text):
84 tg.send_happiness_message(text)
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/app/tasks.py b/src/app/tasks.py
--- a/src/app/tasks.py
+++ b/src/app/tasks.py
@@ -3,6 +3,7 @@
from anymail.exceptions import AnymailRequestsAPIError
from django.apps import apps
from django.conf import settings
+from django.core.exceptions import ObjectDoesNotExist
from requests.exceptions import RequestException
from app.celery import celery
@@ -57,11 +58,12 @@
@celery.task(
- autoretry_for=[RequestException, MailchimpException],
+ autoretry_for=[RequestException, MailchimpException, ObjectDoesNotExist],
retry_kwargs={
'max_retries': 10,
'countdown': 5,
},
+ rate_limit='1/s',
)
def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):
if list_id is None:
|
{"golden_diff": "diff --git a/src/app/tasks.py b/src/app/tasks.py\n--- a/src/app/tasks.py\n+++ b/src/app/tasks.py\n@@ -3,6 +3,7 @@\n from anymail.exceptions import AnymailRequestsAPIError\n from django.apps import apps\n from django.conf import settings\n+from django.core.exceptions import ObjectDoesNotExist\n from requests.exceptions import RequestException\n \n from app.celery import celery\n@@ -57,11 +58,12 @@\n \n \n @celery.task(\n- autoretry_for=[RequestException, MailchimpException],\n+ autoretry_for=[RequestException, MailchimpException, ObjectDoesNotExist],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n+ rate_limit='1/s',\n )\n def subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):\n if list_id is None:\n", "issue": "User.DoesNotExist: User matching query does not exist.\nSentry Issue: [EDUCATION-BACKEND-23](https://sentry.io/organizations/f213/issues/2200858697/?referrer=github_integration)\n\n```\nUser.DoesNotExist: User matching query does not exist.\n File \"celery/app/autoretry.py\", line 34, in run\n return task._orig_run(*args, **kwargs)\n File \"app/tasks.py\", line 77, in subscribe_to_mailchimp\n user=apps.get_model('users.User').objects.get(pk=user_id),\n File \"django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n```\n", "before_files": [{"content": "from typing import List, Union\n\nfrom anymail.exceptions import AnymailRequestsAPIError\nfrom django.apps import apps\nfrom django.conf import settings\nfrom requests.exceptions import RequestException\n\nfrom app.celery import celery\nfrom app.integrations import tg\nfrom app.integrations.clickmeeting import ClickMeetingClient, ClickMeetingHTTPException\nfrom app.integrations.mailchimp import AppMailchimp, MailchimpException\nfrom app.integrations.zoomus import ZoomusClient, ZoomusHTTPException\nfrom app.mail.owl import TemplOwl\n\n\[email protected](\n autoretry_for=[AnymailRequestsAPIError],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef send_mail(to: Union[List, str], template_id, subject: str = '', ctx: dict = None, disable_antispam=False):\n TemplOwl(\n to=to,\n template_id=template_id,\n subject=subject,\n ctx=ctx,\n disable_antispam=disable_antispam,\n ).send()\n\n\[email protected](\n autoretry_for=[RequestException, ClickMeetingHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_clickmeeting(room_url: str, email: str):\n client = ClickMeetingClient()\n client.invite(room_url, email)\n\n\[email protected](\n autoretry_for=[RequestException, ZoomusHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_zoomus(webinar_id: str, user_id: int):\n user = apps.get_model('users.User').objects.get(pk=user_id)\n\n client = ZoomusClient()\n client.invite(webinar_id, user)\n\n\[email protected](\n autoretry_for=[RequestException, MailchimpException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):\n if list_id is None:\n list_id = settings.MAILCHIMP_CONTACT_LIST_ID\n\n if not list_id:\n return\n\n mailchimp = AppMailchimp()\n\n mailchimp.subscribe_django_user(\n list_id=list_id,\n user=apps.get_model('users.User').objects.get(pk=user_id),\n tags=tags,\n )\n\n\[email protected]\ndef send_happiness_message(text):\n tg.send_happiness_message(text)\n", "path": "src/app/tasks.py"}], "after_files": [{"content": "from typing import List, Union\n\nfrom anymail.exceptions import AnymailRequestsAPIError\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom requests.exceptions import RequestException\n\nfrom app.celery import celery\nfrom app.integrations import tg\nfrom app.integrations.clickmeeting import ClickMeetingClient, ClickMeetingHTTPException\nfrom app.integrations.mailchimp import AppMailchimp, MailchimpException\nfrom app.integrations.zoomus import ZoomusClient, ZoomusHTTPException\nfrom app.mail.owl import TemplOwl\n\n\[email protected](\n autoretry_for=[AnymailRequestsAPIError],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef send_mail(to: Union[List, str], template_id, subject: str = '', ctx: dict = None, disable_antispam=False):\n TemplOwl(\n to=to,\n template_id=template_id,\n subject=subject,\n ctx=ctx,\n disable_antispam=disable_antispam,\n ).send()\n\n\[email protected](\n autoretry_for=[RequestException, ClickMeetingHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_clickmeeting(room_url: str, email: str):\n client = ClickMeetingClient()\n client.invite(room_url, email)\n\n\[email protected](\n autoretry_for=[RequestException, ZoomusHTTPException],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n)\ndef invite_to_zoomus(webinar_id: str, user_id: int):\n user = apps.get_model('users.User').objects.get(pk=user_id)\n\n client = ZoomusClient()\n client.invite(webinar_id, user)\n\n\[email protected](\n autoretry_for=[RequestException, MailchimpException, ObjectDoesNotExist],\n retry_kwargs={\n 'max_retries': 10,\n 'countdown': 5,\n },\n rate_limit='1/s',\n)\ndef subscribe_to_mailchimp(user_id: int, list_id=None, tags=None):\n if list_id is None:\n list_id = settings.MAILCHIMP_CONTACT_LIST_ID\n\n if not list_id:\n return\n\n mailchimp = AppMailchimp()\n\n mailchimp.subscribe_django_user(\n list_id=list_id,\n user=apps.get_model('users.User').objects.get(pk=user_id),\n tags=tags,\n )\n\n\[email protected]\ndef send_happiness_message(text):\n tg.send_happiness_message(text)\n", "path": "src/app/tasks.py"}]}
| 1,175 | 196 |
gh_patches_debug_21052
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1251
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0008_migrate_processing_date cannot be run when existing Payables have payments
### Describe the bug
Inside the `0008_migrate_processing_date` migration we delete payments. However, the payables they belong to protect them so the migration fails.
### How to reproduce
Steps to reproduce the behaviour:
1. Have an existing pizza order or membership renewal with a non-processed payment
2. Execute migration
### Expected behaviour
Successful migration.
### Additional context
```
web_1 | Running migrations:
web_1 | Traceback (most recent call last):
web_1 | File "./manage.py", line 19, in <module>
web_1 | execute_from_command_line(sys.argv)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
web_1 | utility.execute()
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
web_1 | self.fetch_command(subcommand).run_from_argv(self.argv)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 330, in run_from_argv
web_1 | self.execute(*args, **cmd_options)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 371, in execute
web_1 | output = self.handle(*args, **options)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 85, in wrapped
web_1 | res = handle_func(*args, **kwargs)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/migrate.py", line 243, in handle
web_1 | post_migrate_state = executor.migrate(
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 117, in migrate
web_1 | state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
web_1 | state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
web_1 | state = migration.apply(state, schema_editor)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/migration.py", line 124, in apply
web_1 | operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
web_1 | self.code(from_state.apps, schema_editor)
web_1 | File "/usr/src/app/website/payments/migrations/0008_migrate_processing_date.py", line 11, in set_created_at
web_1 | payment.delete()
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 946, in delete
web_1 | collector.collect([self], keep_parents=keep_parents)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/models/deletion.py", line 302, in collect
web_1 | raise ProtectedError(
web_1 | django.db.models.deletion.ProtectedError: ("Cannot delete some instances of model 'Payment' because they are referenced through protected foreign keys: 'Entry.payment'.", <itertools.chain object at 0x7f087b01aa00>)
```
```
web_1 | Running migrations:
web_1 | Traceback (most recent call last):
web_1 | File "./manage.py", line 19, in <module>
web_1 | execute_from_command_line(sys.argv)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
web_1 | utility.execute()
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py", line 395, in execute
web_1 | self.fetch_command(subcommand).run_from_argv(self.argv)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 330, in run_from_argv
web_1 | self.execute(*args, **cmd_options)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 371, in execute
web_1 | output = self.handle(*args, **options)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/base.py", line 85, in wrapped
web_1 | res = handle_func(*args, **kwargs)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/core/management/commands/migrate.py", line 243, in handle
web_1 | post_migrate_state = executor.migrate(
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 117, in migrate
web_1 | state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
web_1 | state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
web_1 | state = migration.apply(state, schema_editor)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/migration.py", line 124, in apply
web_1 | operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
web_1 | self.code(from_state.apps, schema_editor)
web_1 | File "/usr/src/app/website/payments/migrations/0008_migrate_processing_date.py", line 11, in set_created_at
web_1 | payment.delete()
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/models/base.py", line 946, in delete
web_1 | collector.collect([self], keep_parents=keep_parents)
web_1 | File "/usr/local/lib/python3.8/site-packages/django/db/models/deletion.py", line 302, in collect
web_1 | raise ProtectedError(
web_1 | django.db.models.deletion.ProtectedError: ("Cannot delete some instances of model 'Payment' because they are referenced through protected foreign keys: 'Order.payment'.", <itertools.chain object at 0x7f5e652bcaf0>)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/payments/migrations/0008_migrate_processing_date.py`
Content:
```
1 from django.db import migrations
2
3
4 def set_created_at(apps, schema_editor):
5 """On this migration, set the created at to the processing date."""
6 Payment = apps.get_model('payments', 'Payment')
7 for payment in Payment.objects.all():
8 if payment.processing_date:
9 payment.created_at = payment.processing_date
10 else:
11 payment.delete()
12
13
14 class Migration(migrations.Migration):
15
16 dependencies = [
17 ('payments', '0007_auto_20200510_2042'),
18 ]
19
20 operations = [
21 migrations.RunPython(set_created_at)
22 ]
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/payments/migrations/0008_migrate_processing_date.py b/website/payments/migrations/0008_migrate_processing_date.py
--- a/website/payments/migrations/0008_migrate_processing_date.py
+++ b/website/payments/migrations/0008_migrate_processing_date.py
@@ -8,9 +8,26 @@
if payment.processing_date:
payment.created_at = payment.processing_date
else:
+ if hasattr(payment, 'pizzas_order'):
+ order = payment.pizzas_order
+ order.payment = None
+ order.save()
+
+ if hasattr(payment, 'registrations_entry'):
+ entry = payment.registrations_entry
+ entry.payment = None
+ entry.save()
+
payment.delete()
+def set_processed_date(apps, schema_editor):
+ """Revert sets the processing date to the created_at value."""
+ Payment = apps.get_model('payments', 'Payment')
+ for payment in Payment.objects.all():
+ payment.processing_date = payment.created_at
+
+
class Migration(migrations.Migration):
dependencies = [
@@ -18,5 +35,5 @@
]
operations = [
- migrations.RunPython(set_created_at)
+ migrations.RunPython(set_created_at, set_processed_date)
]
|
{"golden_diff": "diff --git a/website/payments/migrations/0008_migrate_processing_date.py b/website/payments/migrations/0008_migrate_processing_date.py\n--- a/website/payments/migrations/0008_migrate_processing_date.py\n+++ b/website/payments/migrations/0008_migrate_processing_date.py\n@@ -8,9 +8,26 @@\n if payment.processing_date:\n payment.created_at = payment.processing_date\n else:\n+ if hasattr(payment, 'pizzas_order'):\n+ order = payment.pizzas_order\n+ order.payment = None\n+ order.save()\n+\n+ if hasattr(payment, 'registrations_entry'):\n+ entry = payment.registrations_entry\n+ entry.payment = None\n+ entry.save()\n+\n payment.delete()\n \n \n+def set_processed_date(apps, schema_editor):\n+ \"\"\"Revert sets the processing date to the created_at value.\"\"\"\n+ Payment = apps.get_model('payments', 'Payment')\n+ for payment in Payment.objects.all():\n+ payment.processing_date = payment.created_at\n+\n+\n class Migration(migrations.Migration):\n \n dependencies = [\n@@ -18,5 +35,5 @@\n ]\n \n operations = [\n- migrations.RunPython(set_created_at)\n+ migrations.RunPython(set_created_at, set_processed_date)\n ]\n", "issue": "0008_migrate_processing_date cannot be run when existing Payables have payments\n### Describe the bug\r\n\r\nInside the `0008_migrate_processing_date` migration we delete payments. However, the payables they belong to protect them so the migration fails.\r\n\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Have an existing pizza order or membership renewal with a non-processed payment\r\n2. Execute migration\r\n\r\n### Expected behaviour\r\nSuccessful migration.\r\n\r\n### Additional context\r\n```\r\nweb_1 | Running migrations:\r\nweb_1 | Traceback (most recent call last):\r\nweb_1 | File \"./manage.py\", line 19, in <module>\r\nweb_1 | execute_from_command_line(sys.argv)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\nweb_1 | utility.execute()\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py\", line 395, in execute\r\nweb_1 | self.fetch_command(subcommand).run_from_argv(self.argv)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 330, in run_from_argv\r\nweb_1 | self.execute(*args, **cmd_options)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 371, in execute\r\nweb_1 | output = self.handle(*args, **options)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 85, in wrapped\r\nweb_1 | res = handle_func(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/commands/migrate.py\", line 243, in handle\r\nweb_1 | post_migrate_state = executor.migrate(\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\nweb_1 | state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\nweb_1 | state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\nweb_1 | state = migration.apply(state, schema_editor)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/migration.py\", line 124, in apply\r\nweb_1 | operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\nweb_1 | self.code(from_state.apps, schema_editor)\r\nweb_1 | File \"/usr/src/app/website/payments/migrations/0008_migrate_processing_date.py\", line 11, in set_created_at\r\nweb_1 | payment.delete()\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/models/base.py\", line 946, in delete\r\nweb_1 | collector.collect([self], keep_parents=keep_parents)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/models/deletion.py\", line 302, in collect\r\nweb_1 | raise ProtectedError(\r\nweb_1 | django.db.models.deletion.ProtectedError: (\"Cannot delete some instances of model 'Payment' because they are referenced through protected foreign keys: 'Entry.payment'.\", <itertools.chain object at 0x7f087b01aa00>)\r\n```\r\n\r\n```\r\nweb_1 | Running migrations:\r\nweb_1 | Traceback (most recent call last):\r\nweb_1 | File \"./manage.py\", line 19, in <module>\r\nweb_1 | execute_from_command_line(sys.argv)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\nweb_1 | utility.execute()\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/__init__.py\", line 395, in execute\r\nweb_1 | self.fetch_command(subcommand).run_from_argv(self.argv)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 330, in run_from_argv\r\nweb_1 | self.execute(*args, **cmd_options)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 371, in execute\r\nweb_1 | output = self.handle(*args, **options)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/base.py\", line 85, in wrapped\r\nweb_1 | res = handle_func(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/management/commands/migrate.py\", line 243, in handle\r\nweb_1 | post_migrate_state = executor.migrate(\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\nweb_1 | state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\nweb_1 | state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\nweb_1 | state = migration.apply(state, schema_editor)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/migration.py\", line 124, in apply\r\nweb_1 | operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\nweb_1 | self.code(from_state.apps, schema_editor)\r\nweb_1 | File \"/usr/src/app/website/payments/migrations/0008_migrate_processing_date.py\", line 11, in set_created_at\r\nweb_1 | payment.delete()\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/models/base.py\", line 946, in delete\r\nweb_1 | collector.collect([self], keep_parents=keep_parents)\r\nweb_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/models/deletion.py\", line 302, in collect\r\nweb_1 | raise ProtectedError(\r\nweb_1 | django.db.models.deletion.ProtectedError: (\"Cannot delete some instances of model 'Payment' because they are referenced through protected foreign keys: 'Order.payment'.\", <itertools.chain object at 0x7f5e652bcaf0>)\r\n```\n", "before_files": [{"content": "from django.db import migrations\n\n\ndef set_created_at(apps, schema_editor):\n \"\"\"On this migration, set the created at to the processing date.\"\"\"\n Payment = apps.get_model('payments', 'Payment')\n for payment in Payment.objects.all():\n if payment.processing_date:\n payment.created_at = payment.processing_date\n else:\n payment.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('payments', '0007_auto_20200510_2042'),\n ]\n\n operations = [\n migrations.RunPython(set_created_at)\n ]\n", "path": "website/payments/migrations/0008_migrate_processing_date.py"}], "after_files": [{"content": "from django.db import migrations\n\n\ndef set_created_at(apps, schema_editor):\n \"\"\"On this migration, set the created at to the processing date.\"\"\"\n Payment = apps.get_model('payments', 'Payment')\n for payment in Payment.objects.all():\n if payment.processing_date:\n payment.created_at = payment.processing_date\n else:\n if hasattr(payment, 'pizzas_order'):\n order = payment.pizzas_order\n order.payment = None\n order.save()\n\n if hasattr(payment, 'registrations_entry'):\n entry = payment.registrations_entry\n entry.payment = None\n entry.save()\n\n payment.delete()\n\n\ndef set_processed_date(apps, schema_editor):\n \"\"\"Revert sets the processing date to the created_at value.\"\"\"\n Payment = apps.get_model('payments', 'Payment')\n for payment in Payment.objects.all():\n payment.processing_date = payment.created_at\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('payments', '0007_auto_20200510_2042'),\n ]\n\n operations = [\n migrations.RunPython(set_created_at, set_processed_date)\n ]\n", "path": "website/payments/migrations/0008_migrate_processing_date.py"}]}
| 2,254 | 295 |
gh_patches_debug_18093
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-nlp-835
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add compilation defaults for the MaskedLM task models
In https://github.com/keras-team/keras-nlp/issues/709 we added compilation defaults to all classification tasks. We should also do the same for all `XXMaskedLM` models. E.g. [bert](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bert/bert_masked_lm.py). Here's the full list of models.
- [ ] `AlbertMaskedLM`
- [ ] `BertMaskedLM`
- [ ] `DebertV3MaskedLM`
- [ ] `DistilBertMaskedLM`
- [ ] `FNetMaskedLM`
- [ ] `RobertaMaskedLM`
https://github.com/keras-team/keras-nlp/pull/714 is a good template PR. We can probably just use Adam and `1e-5` or `2e-5` as a learning rate for now. Though a little experimentation for each model to make sure the task does converge would be helpful. This [colab](https://gist.github.com/mattdangerw/b16c257973762a0b4ab9a34f6a932cc1) may be a helpful starting place.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py`
Content:
```
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """DeBERTaV3 masked lm model."""
15
16 import copy
17
18 from tensorflow import keras
19
20 from keras_nlp.api_export import keras_nlp_export
21 from keras_nlp.layers.masked_lm_head import MaskedLMHead
22 from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone
23 from keras_nlp.models.deberta_v3.deberta_v3_backbone import (
24 deberta_kernel_initializer,
25 )
26 from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (
27 DebertaV3MaskedLMPreprocessor,
28 )
29 from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
30 from keras_nlp.models.task import Task
31 from keras_nlp.utils.python_utils import classproperty
32
33
34 @keras_nlp_export("keras_nlp.models.DebertaV3MaskedLM")
35 class DebertaV3MaskedLM(Task):
36 """An end-to-end DeBERTaV3 model for the masked language modeling task.
37
38 This model will train DeBERTaV3 on a masked language modeling task.
39 The model will predict labels for a number of masked tokens in the
40 input data. For usage of this model with pre-trained weights, see the
41 `from_preset()` method.
42
43 This model can optionally be configured with a `preprocessor` layer, in
44 which case inputs can be raw string features during `fit()`, `predict()`,
45 and `evaluate()`. Inputs will be tokenized and dynamically masked during
46 training and evaluation. This is done by default when creating the model
47 with `from_preset()`.
48
49 Disclaimer: Pre-trained models are provided on an "as is" basis, without
50 warranties or conditions of any kind. The underlying model is provided by a
51 third party and subject to a separate license, available
52 [here](https://github.com/microsoft/DeBERTa).
53
54 Args:
55 backbone: A `keras_nlp.models.DebertaV3Backbone` instance.
56 preprocessor: A `keras_nlp.models.DebertaV3MaskedLMPreprocessor` or
57 `None`. If `None`, this model will not apply preprocessing, and
58 inputs should be preprocessed before calling the model.
59
60 Example usage:
61
62 Raw string inputs and pretrained backbone.
63 ```python
64 # Create a dataset with raw string features. Labels are inferred.
65 features = ["The quick brown fox jumped.", "I forgot my homework."]
66
67 # Create a DebertaV3MaskedLM with a pretrained backbone and further train
68 # on an MLM task.
69 masked_lm = keras_nlp.models.DebertaV3MaskedLM.from_preset(
70 "deberta_v3_base_en",
71 )
72 masked_lm.compile(
73 loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
74 )
75 masked_lm.fit(x=features, batch_size=2)
76 ```
77
78 Preprocessed inputs and custom backbone.
79 ```python
80 # Create a preprocessed dataset where 0 is the mask token.
81 preprocessed_features = {
82 "token_ids": tf.constant(
83 [[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8)
84 ),
85 "padding_mask": tf.constant(
86 [[1, 1, 1, 1, 1, 1, 1, 1]] * 2, shape=(2, 8)
87 ),
88 "mask_positions": tf.constant([[2, 4]] * 2, shape=(2, 2))
89 }
90 # Labels are the original masked values.
91 labels = [[3, 5]] * 2
92
93 # Randomly initialize a DeBERTaV3 encoder
94 backbone = keras_nlp.models.DebertaV3Backbone(
95 vocabulary_size=50265,
96 num_layers=12,
97 num_heads=12,
98 hidden_dim=768,
99 intermediate_dim=3072,
100 max_sequence_length=12
101 )
102 # Create a DeBERTaV3 masked_lm and fit the data.
103 masked_lm = keras_nlp.models.DebertaV3MaskedLM(
104 backbone,
105 preprocessor=None,
106 )
107 masked_lm.compile(
108 loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
109 )
110 masked_lm.fit(x=preprocessed_features, y=labels, batch_size=2)
111 ```
112 """
113
114 def __init__(
115 self,
116 backbone,
117 preprocessor=None,
118 **kwargs,
119 ):
120 inputs = {
121 **backbone.input,
122 "mask_positions": keras.Input(
123 shape=(None,), dtype="int32", name="mask_positions"
124 ),
125 }
126 backbone_outputs = backbone(backbone.input)
127 outputs = MaskedLMHead(
128 vocabulary_size=backbone.vocabulary_size,
129 embedding_weights=backbone.token_embedding.embeddings,
130 intermediate_activation=lambda x: keras.activations.gelu(
131 x, approximate=False
132 ),
133 kernel_initializer=deberta_kernel_initializer(),
134 name="mlm_head",
135 )(backbone_outputs, inputs["mask_positions"])
136
137 # Instantiate using Functional API Model constructor
138 super().__init__(
139 inputs=inputs,
140 outputs=outputs,
141 include_preprocessing=preprocessor is not None,
142 **kwargs,
143 )
144 # All references to `self` below this line
145 self.backbone = backbone
146 self.preprocessor = preprocessor
147
148 @classproperty
149 def backbone_cls(cls):
150 return DebertaV3Backbone
151
152 @classproperty
153 def preprocessor_cls(cls):
154 return DebertaV3MaskedLMPreprocessor
155
156 @classproperty
157 def presets(cls):
158 return copy.deepcopy(backbone_presets)
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py
--- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py
+++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py
@@ -28,6 +28,7 @@
)
from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
from keras_nlp.models.task import Task
+from keras_nlp.utils.keras_utils import is_xla_compatible
from keras_nlp.utils.python_utils import classproperty
@@ -145,6 +146,13 @@
self.backbone = backbone
self.preprocessor = preprocessor
+ self.compile(
+ loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
+ optimizer=keras.optimizers.Adam(5e-5),
+ weighted_metrics=keras.metrics.SparseCategoricalAccuracy(),
+ jit_compile=is_xla_compatible(self),
+ )
+
@classproperty
def backbone_cls(cls):
return DebertaV3Backbone
|
{"golden_diff": "diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py\n--- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py\n+++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py\n@@ -28,6 +28,7 @@\n )\n from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\n from keras_nlp.models.task import Task\n+from keras_nlp.utils.keras_utils import is_xla_compatible\n from keras_nlp.utils.python_utils import classproperty\n \n \n@@ -145,6 +146,13 @@\n self.backbone = backbone\n self.preprocessor = preprocessor\n \n+ self.compile(\n+ loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n+ optimizer=keras.optimizers.Adam(5e-5),\n+ weighted_metrics=keras.metrics.SparseCategoricalAccuracy(),\n+ jit_compile=is_xla_compatible(self),\n+ )\n+\n @classproperty\n def backbone_cls(cls):\n return DebertaV3Backbone\n", "issue": "Add compilation defaults for the MaskedLM task models\nIn https://github.com/keras-team/keras-nlp/issues/709 we added compilation defaults to all classification tasks. We should also do the same for all `XXMaskedLM` models. E.g. [bert](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bert/bert_masked_lm.py). Here's the full list of models.\r\n\r\n- [ ] `AlbertMaskedLM`\r\n- [ ] `BertMaskedLM`\r\n- [ ] `DebertV3MaskedLM`\r\n- [ ] `DistilBertMaskedLM`\r\n- [ ] `FNetMaskedLM`\r\n- [ ] `RobertaMaskedLM`\r\n\r\nhttps://github.com/keras-team/keras-nlp/pull/714 is a good template PR. We can probably just use Adam and `1e-5` or `2e-5` as a learning rate for now. Though a little experimentation for each model to make sure the task does converge would be helpful. This [colab](https://gist.github.com/mattdangerw/b16c257973762a0b4ab9a34f6a932cc1) may be a helpful starting place.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"DeBERTaV3 masked lm model.\"\"\"\n\nimport copy\n\nfrom tensorflow import keras\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.masked_lm_head import MaskedLMHead\nfrom keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone\nfrom keras_nlp.models.deberta_v3.deberta_v3_backbone import (\n deberta_kernel_initializer,\n)\nfrom keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (\n DebertaV3MaskedLMPreprocessor,\n)\nfrom keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\nfrom keras_nlp.models.task import Task\nfrom keras_nlp.utils.python_utils import classproperty\n\n\n@keras_nlp_export(\"keras_nlp.models.DebertaV3MaskedLM\")\nclass DebertaV3MaskedLM(Task):\n \"\"\"An end-to-end DeBERTaV3 model for the masked language modeling task.\n\n This model will train DeBERTaV3 on a masked language modeling task.\n The model will predict labels for a number of masked tokens in the\n input data. For usage of this model with pre-trained weights, see the\n `from_preset()` method.\n\n This model can optionally be configured with a `preprocessor` layer, in\n which case inputs can be raw string features during `fit()`, `predict()`,\n and `evaluate()`. Inputs will be tokenized and dynamically masked during\n training and evaluation. This is done by default when creating the model\n with `from_preset()`.\n\n Disclaimer: Pre-trained models are provided on an \"as is\" basis, without\n warranties or conditions of any kind. The underlying model is provided by a\n third party and subject to a separate license, available\n [here](https://github.com/microsoft/DeBERTa).\n\n Args:\n backbone: A `keras_nlp.models.DebertaV3Backbone` instance.\n preprocessor: A `keras_nlp.models.DebertaV3MaskedLMPreprocessor` or\n `None`. If `None`, this model will not apply preprocessing, and\n inputs should be preprocessed before calling the model.\n\n Example usage:\n\n Raw string inputs and pretrained backbone.\n ```python\n # Create a dataset with raw string features. Labels are inferred.\n features = [\"The quick brown fox jumped.\", \"I forgot my homework.\"]\n\n # Create a DebertaV3MaskedLM with a pretrained backbone and further train\n # on an MLM task.\n masked_lm = keras_nlp.models.DebertaV3MaskedLM.from_preset(\n \"deberta_v3_base_en\",\n )\n masked_lm.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n )\n masked_lm.fit(x=features, batch_size=2)\n ```\n\n Preprocessed inputs and custom backbone.\n ```python\n # Create a preprocessed dataset where 0 is the mask token.\n preprocessed_features = {\n \"token_ids\": tf.constant(\n [[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8)\n ),\n \"padding_mask\": tf.constant(\n [[1, 1, 1, 1, 1, 1, 1, 1]] * 2, shape=(2, 8)\n ),\n \"mask_positions\": tf.constant([[2, 4]] * 2, shape=(2, 2))\n }\n # Labels are the original masked values.\n labels = [[3, 5]] * 2\n\n # Randomly initialize a DeBERTaV3 encoder\n backbone = keras_nlp.models.DebertaV3Backbone(\n vocabulary_size=50265,\n num_layers=12,\n num_heads=12,\n hidden_dim=768,\n intermediate_dim=3072,\n max_sequence_length=12\n )\n # Create a DeBERTaV3 masked_lm and fit the data.\n masked_lm = keras_nlp.models.DebertaV3MaskedLM(\n backbone,\n preprocessor=None,\n )\n masked_lm.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n )\n masked_lm.fit(x=preprocessed_features, y=labels, batch_size=2)\n ```\n \"\"\"\n\n def __init__(\n self,\n backbone,\n preprocessor=None,\n **kwargs,\n ):\n inputs = {\n **backbone.input,\n \"mask_positions\": keras.Input(\n shape=(None,), dtype=\"int32\", name=\"mask_positions\"\n ),\n }\n backbone_outputs = backbone(backbone.input)\n outputs = MaskedLMHead(\n vocabulary_size=backbone.vocabulary_size,\n embedding_weights=backbone.token_embedding.embeddings,\n intermediate_activation=lambda x: keras.activations.gelu(\n x, approximate=False\n ),\n kernel_initializer=deberta_kernel_initializer(),\n name=\"mlm_head\",\n )(backbone_outputs, inputs[\"mask_positions\"])\n\n # Instantiate using Functional API Model constructor\n super().__init__(\n inputs=inputs,\n outputs=outputs,\n include_preprocessing=preprocessor is not None,\n **kwargs,\n )\n # All references to `self` below this line\n self.backbone = backbone\n self.preprocessor = preprocessor\n\n @classproperty\n def backbone_cls(cls):\n return DebertaV3Backbone\n\n @classproperty\n def preprocessor_cls(cls):\n return DebertaV3MaskedLMPreprocessor\n\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "path": "keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py"}], "after_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"DeBERTaV3 masked lm model.\"\"\"\n\nimport copy\n\nfrom tensorflow import keras\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.layers.masked_lm_head import MaskedLMHead\nfrom keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone\nfrom keras_nlp.models.deberta_v3.deberta_v3_backbone import (\n deberta_kernel_initializer,\n)\nfrom keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (\n DebertaV3MaskedLMPreprocessor,\n)\nfrom keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets\nfrom keras_nlp.models.task import Task\nfrom keras_nlp.utils.keras_utils import is_xla_compatible\nfrom keras_nlp.utils.python_utils import classproperty\n\n\n@keras_nlp_export(\"keras_nlp.models.DebertaV3MaskedLM\")\nclass DebertaV3MaskedLM(Task):\n \"\"\"An end-to-end DeBERTaV3 model for the masked language modeling task.\n\n This model will train DeBERTaV3 on a masked language modeling task.\n The model will predict labels for a number of masked tokens in the\n input data. For usage of this model with pre-trained weights, see the\n `from_preset()` method.\n\n This model can optionally be configured with a `preprocessor` layer, in\n which case inputs can be raw string features during `fit()`, `predict()`,\n and `evaluate()`. Inputs will be tokenized and dynamically masked during\n training and evaluation. This is done by default when creating the model\n with `from_preset()`.\n\n Disclaimer: Pre-trained models are provided on an \"as is\" basis, without\n warranties or conditions of any kind. The underlying model is provided by a\n third party and subject to a separate license, available\n [here](https://github.com/microsoft/DeBERTa).\n\n Args:\n backbone: A `keras_nlp.models.DebertaV3Backbone` instance.\n preprocessor: A `keras_nlp.models.DebertaV3MaskedLMPreprocessor` or\n `None`. If `None`, this model will not apply preprocessing, and\n inputs should be preprocessed before calling the model.\n\n Example usage:\n\n Raw string inputs and pretrained backbone.\n ```python\n # Create a dataset with raw string features. Labels are inferred.\n features = [\"The quick brown fox jumped.\", \"I forgot my homework.\"]\n\n # Create a DebertaV3MaskedLM with a pretrained backbone and further train\n # on an MLM task.\n masked_lm = keras_nlp.models.DebertaV3MaskedLM.from_preset(\n \"deberta_v3_base_en\",\n )\n masked_lm.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n )\n masked_lm.fit(x=features, batch_size=2)\n ```\n\n Preprocessed inputs and custom backbone.\n ```python\n # Create a preprocessed dataset where 0 is the mask token.\n preprocessed_features = {\n \"token_ids\": tf.constant(\n [[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8)\n ),\n \"padding_mask\": tf.constant(\n [[1, 1, 1, 1, 1, 1, 1, 1]] * 2, shape=(2, 8)\n ),\n \"mask_positions\": tf.constant([[2, 4]] * 2, shape=(2, 2))\n }\n # Labels are the original masked values.\n labels = [[3, 5]] * 2\n\n # Randomly initialize a DeBERTaV3 encoder\n backbone = keras_nlp.models.DebertaV3Backbone(\n vocabulary_size=50265,\n num_layers=12,\n num_heads=12,\n hidden_dim=768,\n intermediate_dim=3072,\n max_sequence_length=12\n )\n # Create a DeBERTaV3 masked_lm and fit the data.\n masked_lm = keras_nlp.models.DebertaV3MaskedLM(\n backbone,\n preprocessor=None,\n )\n masked_lm.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n )\n masked_lm.fit(x=preprocessed_features, y=labels, batch_size=2)\n ```\n \"\"\"\n\n def __init__(\n self,\n backbone,\n preprocessor=None,\n **kwargs,\n ):\n inputs = {\n **backbone.input,\n \"mask_positions\": keras.Input(\n shape=(None,), dtype=\"int32\", name=\"mask_positions\"\n ),\n }\n backbone_outputs = backbone(backbone.input)\n outputs = MaskedLMHead(\n vocabulary_size=backbone.vocabulary_size,\n embedding_weights=backbone.token_embedding.embeddings,\n intermediate_activation=lambda x: keras.activations.gelu(\n x, approximate=False\n ),\n kernel_initializer=deberta_kernel_initializer(),\n name=\"mlm_head\",\n )(backbone_outputs, inputs[\"mask_positions\"])\n\n # Instantiate using Functional API Model constructor\n super().__init__(\n inputs=inputs,\n outputs=outputs,\n include_preprocessing=preprocessor is not None,\n **kwargs,\n )\n # All references to `self` below this line\n self.backbone = backbone\n self.preprocessor = preprocessor\n\n self.compile(\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=keras.optimizers.Adam(5e-5),\n weighted_metrics=keras.metrics.SparseCategoricalAccuracy(),\n jit_compile=is_xla_compatible(self),\n )\n\n @classproperty\n def backbone_cls(cls):\n return DebertaV3Backbone\n\n @classproperty\n def preprocessor_cls(cls):\n return DebertaV3MaskedLMPreprocessor\n\n @classproperty\n def presets(cls):\n return copy.deepcopy(backbone_presets)\n", "path": "keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py"}]}
| 2,335 | 281 |
gh_patches_debug_26342
|
rasdani/github-patches
|
git_diff
|
zalando__patroni-1535
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
features/steps/standby_cluster.py produces error under Windows
Trying to execute:
`>behave -i standby_cluster.feature`
will produce error:
```
2020-05-07 19:27:19,407 ERROR: Failed to execute ['c:userspashaappdatalocalprogramspythonpython37-32python.exe', 'features/callback.py', '5362', 'on_role_change', 'master', 'batman']
Traceback (most recent call last):
File "C:\Users\pasha\Code\zalando-patroni\patroni\postgresql\cancellable.py", line 28, in _start_process
self._process = psutil.Popen(cmd, *args, **kwargs)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\site-packages\psutil\__init__.py", line 1431, in __init__
self.__subproc = subprocess.Popen(*args, **kwargs)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\subprocess.py", line 775, in __init__
restore_signals, start_new_session)
File "c:\users\pasha\appdata\local\programs\python\python37-32\lib\subprocess.py", line 1178, in _execute_child
startupinfo)
FileNotFoundError: [WinError 2] The system cannot find the file specified
```
This error produced by `postgresql/cancellable.py`:
```
def _start_process(self, cmd, *args, **kwargs):
"""This method must be executed only when the `_lock` is acquired"""
try:
self._process_children = []
self._process_cmd = cmd
self._process = psutil.Popen(cmd, *args, **kwargs)
^^^^^^^^^^^^^^^^^
except Exception:
return logger.exception('Failed to execute %s', cmd)
return True
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `features/steps/standby_cluster.py`
Content:
```
1 import os
2 import sys
3 import time
4
5 from behave import step
6
7
8 select_replication_query = """
9 SELECT * FROM pg_catalog.pg_stat_replication
10 WHERE application_name = '{0}'
11 """
12
13 callback = sys.executable + " features/callback2.py "
14
15
16 @step('I start {name:w} with callback configured')
17 def start_patroni_with_callbacks(context, name):
18 return context.pctl.start(name, custom_config={
19 "postgresql": {
20 "callbacks": {
21 "on_role_change": sys.executable + " features/callback.py"
22 }
23 }
24 })
25
26
27 @step('I start {name:w} in a cluster {cluster_name:w}')
28 def start_patroni(context, name, cluster_name):
29 return context.pctl.start(name, custom_config={
30 "scope": cluster_name,
31 "postgresql": {
32 "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},
33 "backup_restore": {
34 "command": (sys.executable + " features/backup_restore.py --sourcedir=" +
35 os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}
36 }
37 })
38
39
40 @step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}')
41 def start_patroni_standby_cluster(context, name, cluster_name, name2):
42 # we need to remove patroni.dynamic.json in order to "bootstrap" standby cluster with existing PGDATA
43 os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json'))
44 port = context.pctl._processes[name2]._connkwargs.get('port')
45 context.pctl._processes[name].update_config({
46 "scope": cluster_name,
47 "bootstrap": {
48 "dcs": {
49 "ttl": 20,
50 "loop_wait": 2,
51 "retry_timeout": 5,
52 "standby_cluster": {
53 "host": "localhost",
54 "port": port,
55 "primary_slot_name": "pm_1",
56 "create_replica_methods": ["backup_restore", "basebackup"]
57 }
58 }
59 },
60 "postgresql": {
61 "callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}
62 }
63 })
64 return context.pctl.start(name)
65
66
67 @step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds')
68 def check_replication_status(context, pg_name1, pg_name2, timeout):
69 bound_time = time.time() + timeout
70
71 while time.time() < bound_time:
72 cur = context.pctl.query(
73 pg_name2,
74 select_replication_query.format(pg_name1),
75 fail_ok=True
76 )
77
78 if cur and len(cur.fetchall()) != 0:
79 break
80
81 time.sleep(1)
82 else:
83 assert False, "{0} is not replicating from {1} after {2} seconds".format(pg_name1, pg_name2, timeout)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/features/steps/standby_cluster.py b/features/steps/standby_cluster.py
--- a/features/steps/standby_cluster.py
+++ b/features/steps/standby_cluster.py
@@ -10,7 +10,8 @@
WHERE application_name = '{0}'
"""
-callback = sys.executable + " features/callback2.py "
+executable = sys.executable if os.name != 'nt' else sys.executable.replace('\\', '/')
+callback = executable + " features/callback2.py "
@step('I start {name:w} with callback configured')
@@ -18,7 +19,7 @@
return context.pctl.start(name, custom_config={
"postgresql": {
"callbacks": {
- "on_role_change": sys.executable + " features/callback.py"
+ "on_role_change": executable + " features/callback.py"
}
}
})
@@ -31,7 +32,7 @@
"postgresql": {
"callbacks": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},
"backup_restore": {
- "command": (sys.executable + " features/backup_restore.py --sourcedir=" +
+ "command": (executable + " features/backup_restore.py --sourcedir=" +
os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}
}
})
|
{"golden_diff": "diff --git a/features/steps/standby_cluster.py b/features/steps/standby_cluster.py\n--- a/features/steps/standby_cluster.py\n+++ b/features/steps/standby_cluster.py\n@@ -10,7 +10,8 @@\n WHERE application_name = '{0}'\n \"\"\"\n \n-callback = sys.executable + \" features/callback2.py \"\n+executable = sys.executable if os.name != 'nt' else sys.executable.replace('\\\\', '/')\n+callback = executable + \" features/callback2.py \"\n \n \n @step('I start {name:w} with callback configured')\n@@ -18,7 +19,7 @@\n return context.pctl.start(name, custom_config={\n \"postgresql\": {\n \"callbacks\": {\n- \"on_role_change\": sys.executable + \" features/callback.py\"\n+ \"on_role_change\": executable + \" features/callback.py\"\n }\n }\n })\n@@ -31,7 +32,7 @@\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},\n \"backup_restore\": {\n- \"command\": (sys.executable + \" features/backup_restore.py --sourcedir=\" +\n+ \"command\": (executable + \" features/backup_restore.py --sourcedir=\" +\n os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}\n }\n })\n", "issue": "features/steps/standby_cluster.py produces error under Windows\nTrying to execute:\r\n`>behave -i standby_cluster.feature`\r\nwill produce error:\r\n```\r\n2020-05-07 19:27:19,407 ERROR: Failed to execute ['c:userspashaappdatalocalprogramspythonpython37-32python.exe', 'features/callback.py', '5362', 'on_role_change', 'master', 'batman']\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\pasha\\Code\\zalando-patroni\\patroni\\postgresql\\cancellable.py\", line 28, in _start_process\r\n self._process = psutil.Popen(cmd, *args, **kwargs)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\site-packages\\psutil\\__init__.py\", line 1431, in __init__\r\n self.__subproc = subprocess.Popen(*args, **kwargs)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\subprocess.py\", line 775, in __init__\r\n restore_signals, start_new_session)\r\n File \"c:\\users\\pasha\\appdata\\local\\programs\\python\\python37-32\\lib\\subprocess.py\", line 1178, in _execute_child\r\n startupinfo)\r\nFileNotFoundError: [WinError 2] The system cannot find the file specified\r\n```\r\n\r\nThis error produced by `postgresql/cancellable.py`: \r\n```\r\n def _start_process(self, cmd, *args, **kwargs):\r\n \"\"\"This method must be executed only when the `_lock` is acquired\"\"\"\r\n\r\n try:\r\n self._process_children = []\r\n self._process_cmd = cmd\r\n self._process = psutil.Popen(cmd, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^\r\n except Exception:\r\n return logger.exception('Failed to execute %s', cmd)\r\n return True\r\n```\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport time\n\nfrom behave import step\n\n\nselect_replication_query = \"\"\"\nSELECT * FROM pg_catalog.pg_stat_replication\nWHERE application_name = '{0}'\n\"\"\"\n\ncallback = sys.executable + \" features/callback2.py \"\n\n\n@step('I start {name:w} with callback configured')\ndef start_patroni_with_callbacks(context, name):\n return context.pctl.start(name, custom_config={\n \"postgresql\": {\n \"callbacks\": {\n \"on_role_change\": sys.executable + \" features/callback.py\"\n }\n }\n })\n\n\n@step('I start {name:w} in a cluster {cluster_name:w}')\ndef start_patroni(context, name, cluster_name):\n return context.pctl.start(name, custom_config={\n \"scope\": cluster_name,\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},\n \"backup_restore\": {\n \"command\": (sys.executable + \" features/backup_restore.py --sourcedir=\" +\n os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}\n }\n })\n\n\n@step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}')\ndef start_patroni_standby_cluster(context, name, cluster_name, name2):\n # we need to remove patroni.dynamic.json in order to \"bootstrap\" standby cluster with existing PGDATA\n os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json'))\n port = context.pctl._processes[name2]._connkwargs.get('port')\n context.pctl._processes[name].update_config({\n \"scope\": cluster_name,\n \"bootstrap\": {\n \"dcs\": {\n \"ttl\": 20,\n \"loop_wait\": 2,\n \"retry_timeout\": 5,\n \"standby_cluster\": {\n \"host\": \"localhost\",\n \"port\": port,\n \"primary_slot_name\": \"pm_1\",\n \"create_replica_methods\": [\"backup_restore\", \"basebackup\"]\n }\n }\n },\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}\n }\n })\n return context.pctl.start(name)\n\n\n@step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds')\ndef check_replication_status(context, pg_name1, pg_name2, timeout):\n bound_time = time.time() + timeout\n\n while time.time() < bound_time:\n cur = context.pctl.query(\n pg_name2,\n select_replication_query.format(pg_name1),\n fail_ok=True\n )\n\n if cur and len(cur.fetchall()) != 0:\n break\n\n time.sleep(1)\n else:\n assert False, \"{0} is not replicating from {1} after {2} seconds\".format(pg_name1, pg_name2, timeout)\n", "path": "features/steps/standby_cluster.py"}], "after_files": [{"content": "import os\nimport sys\nimport time\n\nfrom behave import step\n\n\nselect_replication_query = \"\"\"\nSELECT * FROM pg_catalog.pg_stat_replication\nWHERE application_name = '{0}'\n\"\"\"\n\nexecutable = sys.executable if os.name != 'nt' else sys.executable.replace('\\\\', '/')\ncallback = executable + \" features/callback2.py \"\n\n\n@step('I start {name:w} with callback configured')\ndef start_patroni_with_callbacks(context, name):\n return context.pctl.start(name, custom_config={\n \"postgresql\": {\n \"callbacks\": {\n \"on_role_change\": executable + \" features/callback.py\"\n }\n }\n })\n\n\n@step('I start {name:w} in a cluster {cluster_name:w}')\ndef start_patroni(context, name, cluster_name):\n return context.pctl.start(name, custom_config={\n \"scope\": cluster_name,\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')},\n \"backup_restore\": {\n \"command\": (executable + \" features/backup_restore.py --sourcedir=\" +\n os.path.join(context.pctl.patroni_path, 'data', 'basebackup'))}\n }\n })\n\n\n@step('I start {name:w} in a standby cluster {cluster_name:w} as a clone of {name2:w}')\ndef start_patroni_standby_cluster(context, name, cluster_name, name2):\n # we need to remove patroni.dynamic.json in order to \"bootstrap\" standby cluster with existing PGDATA\n os.unlink(os.path.join(context.pctl._processes[name]._data_dir, 'patroni.dynamic.json'))\n port = context.pctl._processes[name2]._connkwargs.get('port')\n context.pctl._processes[name].update_config({\n \"scope\": cluster_name,\n \"bootstrap\": {\n \"dcs\": {\n \"ttl\": 20,\n \"loop_wait\": 2,\n \"retry_timeout\": 5,\n \"standby_cluster\": {\n \"host\": \"localhost\",\n \"port\": port,\n \"primary_slot_name\": \"pm_1\",\n \"create_replica_methods\": [\"backup_restore\", \"basebackup\"]\n }\n }\n },\n \"postgresql\": {\n \"callbacks\": {c: callback + name for c in ('on_start', 'on_stop', 'on_restart', 'on_role_change')}\n }\n })\n return context.pctl.start(name)\n\n\n@step('{pg_name1:w} is replicating from {pg_name2:w} after {timeout:d} seconds')\ndef check_replication_status(context, pg_name1, pg_name2, timeout):\n bound_time = time.time() + timeout\n\n while time.time() < bound_time:\n cur = context.pctl.query(\n pg_name2,\n select_replication_query.format(pg_name1),\n fail_ok=True\n )\n\n if cur and len(cur.fetchall()) != 0:\n break\n\n time.sleep(1)\n else:\n assert False, \"{0} is not replicating from {1} after {2} seconds\".format(pg_name1, pg_name2, timeout)\n", "path": "features/steps/standby_cluster.py"}]}
| 1,555 | 313 |
gh_patches_debug_654
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-2123
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.133
On the docket:
+ [x] python<=3.8 symlink with a suffix (eg 3.7m) can create a venv without a pythonX.Y symlink which breaks pex assumptions that pythonX.Y is always available #2119
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.132"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.132"
+__version__ = "2.1.133"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.132\"\n+__version__ = \"2.1.133\"\n", "issue": "Release 2.1.133\nOn the docket:\r\n+ [x] python<=3.8 symlink with a suffix (eg 3.7m) can create a venv without a pythonX.Y symlink which breaks pex assumptions that pythonX.Y is always available #2119\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.132\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.133\"\n", "path": "pex/version.py"}]}
| 373 | 98 |
gh_patches_debug_8786
|
rasdani/github-patches
|
git_diff
|
Flexget__Flexget-1369
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Discover from iptorrents: 'NoneType' object has no attribute 'findAll'
The `iptorrents` plugin is unable to find and produce entries from searches.
### Expected behaviour:
Expected search plugin `iptorrents` to produce entries when used with `discover`.
### Actual behaviour:
Critical error was returned, see below:
```
2016-08-25 02:59 VERBOSE discover search_movies Searching for `The Dark Knight Rises (2012)` with plugin `iptorrents` (1 of 1)
2016-08-25 02:59 CRITICAL task search_movies BUG: Unhandled error in plugin discover: 'NoneType' object has no attribute 'findAll'
Traceback (most recent call last):
File "/usr/local/python/lib/python2.7/site-packages/flexget/task.py", line 472, in __run_plugin
return method(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/event.py", line 23, in __call__
return self.func(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py", line 290, in on_task_input
return self.execute_searches(config, entries, task)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py", line 155, in execute_searches
search_results = search.search(task=task, entry=entry, config=plugin_config)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugin.py", line 118, in wrapped_func
return func(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/sites/iptorrents.py", line 146, in search
for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
AttributeError: 'NoneType' object has no attribute 'findAll'
```
#### Config:
```
...
search_movies:
discover:
what:
- movie_list: movies
from:
- iptorrents:
rss_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
uid: xxxxxxx
password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
category:
- Movie-HD-Bluray
...
```
### Additional information:
- Flexget Version: 2.3.5
- Python Version: 2.7.12
Discover from iptorrents: 'NoneType' object has no attribute 'findAll'
The `iptorrents` plugin is unable to find and produce entries from searches.
### Expected behaviour:
Expected search plugin `iptorrents` to produce entries when used with `discover`.
### Actual behaviour:
Critical error was returned, see below:
```
2016-08-25 02:59 VERBOSE discover search_movies Searching for `The Dark Knight Rises (2012)` with plugin `iptorrents` (1 of 1)
2016-08-25 02:59 CRITICAL task search_movies BUG: Unhandled error in plugin discover: 'NoneType' object has no attribute 'findAll'
Traceback (most recent call last):
File "/usr/local/python/lib/python2.7/site-packages/flexget/task.py", line 472, in __run_plugin
return method(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/event.py", line 23, in __call__
return self.func(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py", line 290, in on_task_input
return self.execute_searches(config, entries, task)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py", line 155, in execute_searches
search_results = search.search(task=task, entry=entry, config=plugin_config)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugin.py", line 118, in wrapped_func
return func(*args, **kwargs)
File "/usr/local/python/lib/python2.7/site-packages/flexget/plugins/sites/iptorrents.py", line 146, in search
for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
AttributeError: 'NoneType' object has no attribute 'findAll'
```
#### Config:
```
...
search_movies:
discover:
what:
- movie_list: movies
from:
- iptorrents:
rss_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
uid: xxxxxxx
password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
category:
- Movie-HD-Bluray
...
```
### Additional information:
- Flexget Version: 2.3.5
- Python Version: 2.7.12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/sites/iptorrents.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # pylint: disable=unused-import, redefined-builtin
3 from future.moves.urllib.parse import quote_plus
4
5 import re
6 import logging
7
8 from flexget import plugin
9 from flexget.config_schema import one_or_more
10 from flexget.entry import Entry
11 from flexget.event import event
12 from flexget.plugins.internal.urlrewriting import UrlRewritingError
13 from flexget.utils import requests
14 from flexget.utils.soup import get_soup
15 from flexget.utils.search import torrent_availability, normalize_unicode
16 from flexget.utils.tools import parse_filesize
17
18 log = logging.getLogger('iptorrents')
19
20 CATEGORIES = {
21
22 # Movies
23 'Movie-all': 72,
24 'Movie-3D': 87,
25 'Movie-480p': 77,
26 'Movie-BD-R': 89,
27 'Movie-BD-Rip': 90,
28 'Movie-DVD-R': 6,
29 'Movie-HD-Bluray': 48,
30 'Movie-Kids': 54,
31 'Movie-MP4': 62,
32 'Movie-Non-English': 38,
33 'Movie-Packs': 68,
34 'Movie-XviD': 17,
35
36 # TV
37 'TV-all': 73,
38 'TV-Sports': 55,
39 'TV-480p': 78,
40 'TV-MP4': 66,
41 'TV-Non-English': 82,
42 'TV-Packs': 65,
43 'TV-Packs-Non-English': 83,
44 'TV-SD-x264': 79,
45 'TV-x264': 5,
46 'TV-XVID': 4,
47 'TV-Web-DL': 22
48 }
49
50 BASE_URL = 'https://iptorrents.com'
51
52
53 class UrlRewriteIPTorrents(object):
54 """
55 IpTorrents urlrewriter and search plugin.
56
57 iptorrents:
58 rss_key: xxxxxxxxx (required)
59 uid: xxxxxxxx (required)
60 password: xxxxxxxx (required)
61 category: HD
62
63 Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,
64 Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,
65 Movie-HD-Bluray, Movie-Kids, Movie-MP4,
66 Movie-Non-English, Movie-Packs, Movie-XviD,
67
68 TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,
69 TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID, TV-Web-DL
70 """
71
72 schema = {
73 'type': 'object',
74 'properties': {
75 'rss_key': {'type': 'string'},
76 'uid': {'oneOf': [
77 {'type': 'integer'},
78 {'type': 'string'}
79 ]},
80 'password': {'type': 'string'},
81 'category': one_or_more({
82 'oneOf': [
83 {'type': 'integer'},
84 {'type': 'string', 'enum': list(CATEGORIES)},
85 ]}),
86 },
87 'required': ['rss_key', 'uid', 'password'],
88 'additionalProperties': False
89 }
90
91 # urlrewriter API
92 def url_rewritable(self, task, entry):
93 url = entry['url']
94 if url.startswith(BASE_URL + '/download.php/'):
95 return False
96 if url.startswith(BASE_URL + '/'):
97 return True
98 return False
99
100 # urlrewriter API
101 def url_rewrite(self, task, entry):
102 if 'url' not in entry:
103 log.error("Didn't actually get a URL...")
104 else:
105 log.debug("Got the URL: %s" % entry['url'])
106 if entry['url'].startswith(BASE_URL + '/t?'):
107 # use search
108 results = self.search(task, entry)
109 if not results:
110 raise UrlRewritingError("No search results found")
111 # TODO: Search doesn't enforce close match to title, be more picky
112 entry['url'] = results[0]['url']
113
114 @plugin.internet(log)
115 def search(self, task, entry, config=None):
116 """
117 Search for name from iptorrents
118 """
119
120 categories = config.get('category', 'all')
121 # Make sure categories is a list
122 if not isinstance(categories, list):
123 categories = [categories]
124
125 # If there are any text categories, turn them into their id number
126 categories = [c if isinstance(c, int) else CATEGORIES[c]
127 for c in categories]
128 filter_url = '&'.join((str(c) + '=') for c in categories)
129
130 entries = set()
131
132 for search_string in entry.get('search_strings', [entry['title']]):
133 query = normalize_unicode(search_string)
134 query = quote_plus(query.encode('utf8'))
135
136 url = "{base_url}/t?{filter}&q={query}&qf=".format(base_url=BASE_URL, filter=filter_url, query=query)
137 log.debug('searching with url: %s' % url)
138 req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})
139
140 if '/u/' + str(config.get('uid')) not in req.content:
141 raise plugin.PluginError("Invalid cookies (user not logged in)...")
142
143 soup = get_soup(req.content, parser="html.parser")
144 torrents = soup.find('table', {'class': 'torrents'})
145
146 for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
147 entry = Entry()
148 entry['url'] = "{base}{link}?torrent_pass={key}".format(
149 base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))
150 entry['title'] = torrent.findPrevious("a", attrs={'class': 't_title'}).text
151
152 seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text
153 leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text
154 entry['torrent_seeds'] = int(seeders)
155 entry['torrent_leeches'] = int(leechers)
156 entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
157 entry['torrent_leeches'])
158
159 size = torrent.findNext(text=re.compile('^([\.\d]+) ([GMK]?)B$'))
160 size = re.search('^([\.\d]+) ([GMK]?)B$', size)
161
162 entry['content_size'] = parse_filesize(size.group(0))
163
164 entries.add(entry)
165
166 return entries
167
168
169 @event('plugin.register')
170 def register_plugin():
171 plugin.register(UrlRewriteIPTorrents, 'iptorrents', groups=['urlrewriter', 'search'], api_ver=2)
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flexget/plugins/sites/iptorrents.py b/flexget/plugins/sites/iptorrents.py
--- a/flexget/plugins/sites/iptorrents.py
+++ b/flexget/plugins/sites/iptorrents.py
@@ -141,7 +141,7 @@
raise plugin.PluginError("Invalid cookies (user not logged in)...")
soup = get_soup(req.content, parser="html.parser")
- torrents = soup.find('table', {'class': 'torrents'})
+ torrents = soup.find('table', {'id': 'torrents'})
for torrent in torrents.findAll('a', href=re.compile('\.torrent$')):
entry = Entry()
|
{"golden_diff": "diff --git a/flexget/plugins/sites/iptorrents.py b/flexget/plugins/sites/iptorrents.py\n--- a/flexget/plugins/sites/iptorrents.py\n+++ b/flexget/plugins/sites/iptorrents.py\n@@ -141,7 +141,7 @@\n raise plugin.PluginError(\"Invalid cookies (user not logged in)...\")\n \n soup = get_soup(req.content, parser=\"html.parser\")\n- torrents = soup.find('table', {'class': 'torrents'})\n+ torrents = soup.find('table', {'id': 'torrents'})\n \n for torrent in torrents.findAll('a', href=re.compile('\\.torrent$')):\n entry = Entry()\n", "issue": "Discover from iptorrents: 'NoneType' object has no attribute 'findAll'\nThe `iptorrents` plugin is unable to find and produce entries from searches.\n### Expected behaviour:\n\nExpected search plugin `iptorrents` to produce entries when used with `discover`.\n### Actual behaviour:\n\nCritical error was returned, see below:\n\n```\n2016-08-25 02:59 VERBOSE discover search_movies Searching for `The Dark Knight Rises (2012)` with plugin `iptorrents` (1 of 1)\n2016-08-25 02:59 CRITICAL task search_movies BUG: Unhandled error in plugin discover: 'NoneType' object has no attribute 'findAll'\nTraceback (most recent call last):\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/task.py\", line 472, in __run_plugin\n return method(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/event.py\", line 23, in __call__\n return self.func(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py\", line 290, in on_task_input\n return self.execute_searches(config, entries, task)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py\", line 155, in execute_searches\n search_results = search.search(task=task, entry=entry, config=plugin_config)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugin.py\", line 118, in wrapped_func\n return func(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/sites/iptorrents.py\", line 146, in search\n for torrent in torrents.findAll('a', href=re.compile('\\.torrent$')):\nAttributeError: 'NoneType' object has no attribute 'findAll'\n```\n#### Config:\n\n```\n...\n search_movies:\n discover:\n what:\n - movie_list: movies\n from:\n - iptorrents:\n rss_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n uid: xxxxxxx\n password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n category:\n - Movie-HD-Bluray\n... \n```\n### Additional information:\n- Flexget Version: 2.3.5\n- Python Version: 2.7.12\n\nDiscover from iptorrents: 'NoneType' object has no attribute 'findAll'\nThe `iptorrents` plugin is unable to find and produce entries from searches.\n### Expected behaviour:\n\nExpected search plugin `iptorrents` to produce entries when used with `discover`.\n### Actual behaviour:\n\nCritical error was returned, see below:\n\n```\n2016-08-25 02:59 VERBOSE discover search_movies Searching for `The Dark Knight Rises (2012)` with plugin `iptorrents` (1 of 1)\n2016-08-25 02:59 CRITICAL task search_movies BUG: Unhandled error in plugin discover: 'NoneType' object has no attribute 'findAll'\nTraceback (most recent call last):\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/task.py\", line 472, in __run_plugin\n return method(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/event.py\", line 23, in __call__\n return self.func(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py\", line 290, in on_task_input\n return self.execute_searches(config, entries, task)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/input/discover.py\", line 155, in execute_searches\n search_results = search.search(task=task, entry=entry, config=plugin_config)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugin.py\", line 118, in wrapped_func\n return func(*args, **kwargs)\n File \"/usr/local/python/lib/python2.7/site-packages/flexget/plugins/sites/iptorrents.py\", line 146, in search\n for torrent in torrents.findAll('a', href=re.compile('\\.torrent$')):\nAttributeError: 'NoneType' object has no attribute 'findAll'\n```\n#### Config:\n\n```\n...\n search_movies:\n discover:\n what:\n - movie_list: movies\n from:\n - iptorrents:\n rss_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n uid: xxxxxxx\n password: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n category:\n - Movie-HD-Bluray\n... \n```\n### Additional information:\n- Flexget Version: 2.3.5\n- Python Version: 2.7.12\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote_plus\n\nimport re\nimport logging\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.search import torrent_availability, normalize_unicode\nfrom flexget.utils.tools import parse_filesize\n\nlog = logging.getLogger('iptorrents')\n\nCATEGORIES = {\n\n # Movies\n 'Movie-all': 72,\n 'Movie-3D': 87,\n 'Movie-480p': 77,\n 'Movie-BD-R': 89,\n 'Movie-BD-Rip': 90,\n 'Movie-DVD-R': 6,\n 'Movie-HD-Bluray': 48,\n 'Movie-Kids': 54,\n 'Movie-MP4': 62,\n 'Movie-Non-English': 38,\n 'Movie-Packs': 68,\n 'Movie-XviD': 17,\n\n # TV\n 'TV-all': 73,\n 'TV-Sports': 55,\n 'TV-480p': 78,\n 'TV-MP4': 66,\n 'TV-Non-English': 82,\n 'TV-Packs': 65,\n 'TV-Packs-Non-English': 83,\n 'TV-SD-x264': 79,\n 'TV-x264': 5,\n 'TV-XVID': 4,\n 'TV-Web-DL': 22\n}\n\nBASE_URL = 'https://iptorrents.com'\n\n\nclass UrlRewriteIPTorrents(object):\n \"\"\"\n IpTorrents urlrewriter and search plugin.\n\n iptorrents:\n rss_key: xxxxxxxxx (required)\n uid: xxxxxxxx (required)\n password: xxxxxxxx (required)\n category: HD\n\n Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,\n Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,\n Movie-HD-Bluray, Movie-Kids, Movie-MP4,\n Movie-Non-English, Movie-Packs, Movie-XviD,\n\n TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,\n TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID, TV-Web-DL\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'oneOf': [\n {'type': 'integer'},\n {'type': 'string'}\n ]},\n 'password': {'type': 'string'},\n 'category': one_or_more({\n 'oneOf': [\n {'type': 'integer'},\n {'type': 'string', 'enum': list(CATEGORIES)},\n ]}),\n },\n 'required': ['rss_key', 'uid', 'password'],\n 'additionalProperties': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith(BASE_URL + '/download.php/'):\n return False\n if url.startswith(BASE_URL + '/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n log.error(\"Didn't actually get a URL...\")\n else:\n log.debug(\"Got the URL: %s\" % entry['url'])\n if entry['url'].startswith(BASE_URL + '/t?'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError(\"No search results found\")\n # TODO: Search doesn't enforce close match to title, be more picky\n entry['url'] = results[0]['url']\n\n @plugin.internet(log)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from iptorrents\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c]\n for c in categories]\n filter_url = '&'.join((str(c) + '=') for c in categories)\n\n entries = set()\n\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = quote_plus(query.encode('utf8'))\n\n url = \"{base_url}/t?{filter}&q={query}&qf=\".format(base_url=BASE_URL, filter=filter_url, query=query)\n log.debug('searching with url: %s' % url)\n req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})\n\n if '/u/' + str(config.get('uid')) not in req.content:\n raise plugin.PluginError(\"Invalid cookies (user not logged in)...\")\n\n soup = get_soup(req.content, parser=\"html.parser\")\n torrents = soup.find('table', {'class': 'torrents'})\n\n for torrent in torrents.findAll('a', href=re.compile('\\.torrent$')):\n entry = Entry()\n entry['url'] = \"{base}{link}?torrent_pass={key}\".format(\n base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))\n entry['title'] = torrent.findPrevious(\"a\", attrs={'class': 't_title'}).text\n\n seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text\n leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text\n entry['torrent_seeds'] = int(seeders)\n entry['torrent_leeches'] = int(leechers)\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'],\n entry['torrent_leeches'])\n\n size = torrent.findNext(text=re.compile('^([\\.\\d]+) ([GMK]?)B$'))\n size = re.search('^([\\.\\d]+) ([GMK]?)B$', size)\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteIPTorrents, 'iptorrents', groups=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/iptorrents.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote_plus\n\nimport re\nimport logging\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.search import torrent_availability, normalize_unicode\nfrom flexget.utils.tools import parse_filesize\n\nlog = logging.getLogger('iptorrents')\n\nCATEGORIES = {\n\n # Movies\n 'Movie-all': 72,\n 'Movie-3D': 87,\n 'Movie-480p': 77,\n 'Movie-BD-R': 89,\n 'Movie-BD-Rip': 90,\n 'Movie-DVD-R': 6,\n 'Movie-HD-Bluray': 48,\n 'Movie-Kids': 54,\n 'Movie-MP4': 62,\n 'Movie-Non-English': 38,\n 'Movie-Packs': 68,\n 'Movie-XviD': 17,\n\n # TV\n 'TV-all': 73,\n 'TV-Sports': 55,\n 'TV-480p': 78,\n 'TV-MP4': 66,\n 'TV-Non-English': 82,\n 'TV-Packs': 65,\n 'TV-Packs-Non-English': 83,\n 'TV-SD-x264': 79,\n 'TV-x264': 5,\n 'TV-XVID': 4,\n 'TV-Web-DL': 22\n}\n\nBASE_URL = 'https://iptorrents.com'\n\n\nclass UrlRewriteIPTorrents(object):\n \"\"\"\n IpTorrents urlrewriter and search plugin.\n\n iptorrents:\n rss_key: xxxxxxxxx (required)\n uid: xxxxxxxx (required)\n password: xxxxxxxx (required)\n category: HD\n\n Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,\n Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,\n Movie-HD-Bluray, Movie-Kids, Movie-MP4,\n Movie-Non-English, Movie-Packs, Movie-XviD,\n\n TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,\n TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID, TV-Web-DL\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'oneOf': [\n {'type': 'integer'},\n {'type': 'string'}\n ]},\n 'password': {'type': 'string'},\n 'category': one_or_more({\n 'oneOf': [\n {'type': 'integer'},\n {'type': 'string', 'enum': list(CATEGORIES)},\n ]}),\n },\n 'required': ['rss_key', 'uid', 'password'],\n 'additionalProperties': False\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith(BASE_URL + '/download.php/'):\n return False\n if url.startswith(BASE_URL + '/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n log.error(\"Didn't actually get a URL...\")\n else:\n log.debug(\"Got the URL: %s\" % entry['url'])\n if entry['url'].startswith(BASE_URL + '/t?'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError(\"No search results found\")\n # TODO: Search doesn't enforce close match to title, be more picky\n entry['url'] = results[0]['url']\n\n @plugin.internet(log)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from iptorrents\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c]\n for c in categories]\n filter_url = '&'.join((str(c) + '=') for c in categories)\n\n entries = set()\n\n for search_string in entry.get('search_strings', [entry['title']]):\n query = normalize_unicode(search_string)\n query = quote_plus(query.encode('utf8'))\n\n url = \"{base_url}/t?{filter}&q={query}&qf=\".format(base_url=BASE_URL, filter=filter_url, query=query)\n log.debug('searching with url: %s' % url)\n req = requests.get(url, cookies={'uid': str(config['uid']), 'pass': config['password']})\n\n if '/u/' + str(config.get('uid')) not in req.content:\n raise plugin.PluginError(\"Invalid cookies (user not logged in)...\")\n\n soup = get_soup(req.content, parser=\"html.parser\")\n torrents = soup.find('table', {'id': 'torrents'})\n\n for torrent in torrents.findAll('a', href=re.compile('\\.torrent$')):\n entry = Entry()\n entry['url'] = \"{base}{link}?torrent_pass={key}\".format(\n base=BASE_URL, link=torrent['href'], key=config.get('rss_key'))\n entry['title'] = torrent.findPrevious(\"a\", attrs={'class': 't_title'}).text\n\n seeders = torrent.findNext('td', {'class': 'ac t_seeders'}).text\n leechers = torrent.findNext('td', {'class': 'ac t_leechers'}).text\n entry['torrent_seeds'] = int(seeders)\n entry['torrent_leeches'] = int(leechers)\n entry['search_sort'] = torrent_availability(entry['torrent_seeds'],\n entry['torrent_leeches'])\n\n size = torrent.findNext(text=re.compile('^([\\.\\d]+) ([GMK]?)B$'))\n size = re.search('^([\\.\\d]+) ([GMK]?)B$', size)\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteIPTorrents, 'iptorrents', groups=['urlrewriter', 'search'], api_ver=2)\n", "path": "flexget/plugins/sites/iptorrents.py"}]}
| 3,332 | 150 |
gh_patches_debug_67097
|
rasdani/github-patches
|
git_diff
|
celery__celery-5356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prepend to sys.path in the Django fixup instead of appending.
This makes sure that project modules have precedence over system ones.
Closes #5347.
## Description
This follows @Phyks's suggestion of a fix for #5347, by prepending instead of appending to the system path, to ensure that the project modules are not hidden by system-wide ones.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/fixups/django.py`
Content:
```
1 """Django-specific customization."""
2 from __future__ import absolute_import, unicode_literals
3
4 import os
5 import sys
6 import warnings
7 from datetime import datetime
8 from importlib import import_module
9
10 from kombu.utils.imports import symbol_by_name
11 from kombu.utils.objects import cached_property
12
13 from celery import _state, signals
14 from celery.exceptions import FixupWarning, ImproperlyConfigured
15
16 __all__ = ('DjangoFixup', 'fixup')
17
18 ERR_NOT_INSTALLED = """\
19 Environment variable DJANGO_SETTINGS_MODULE is defined
20 but Django isn't installed. Won't apply Django fix-ups!
21 """
22
23
24 def _maybe_close_fd(fh):
25 try:
26 os.close(fh.fileno())
27 except (AttributeError, OSError, TypeError):
28 # TypeError added for celery#962
29 pass
30
31
32 def _verify_django_version(django):
33 if django.VERSION < (1, 11):
34 raise ImproperlyConfigured('Celery 4.x requires Django 1.11 or later.')
35
36
37 def fixup(app, env='DJANGO_SETTINGS_MODULE'):
38 """Install Django fixup if settings module environment is set."""
39 SETTINGS_MODULE = os.environ.get(env)
40 if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():
41 try:
42 import django # noqa
43 except ImportError:
44 warnings.warn(FixupWarning(ERR_NOT_INSTALLED))
45 else:
46 _verify_django_version(django)
47 return DjangoFixup(app).install()
48
49
50 class DjangoFixup(object):
51 """Fixup installed when using Django."""
52
53 def __init__(self, app):
54 self.app = app
55 if _state.default_app is None:
56 self.app.set_default()
57 self._worker_fixup = None
58
59 def install(self):
60 # Need to add project directory to path.
61 # The project directory has precedence over system modules,
62 # so we prepend it to the path.
63 sys.path.prepend(os.getcwd())
64
65 self._settings = symbol_by_name('django.conf:settings')
66 self.app.loader.now = self.now
67
68 signals.import_modules.connect(self.on_import_modules)
69 signals.worker_init.connect(self.on_worker_init)
70 return self
71
72 @property
73 def worker_fixup(self):
74 if self._worker_fixup is None:
75 self._worker_fixup = DjangoWorkerFixup(self.app)
76 return self._worker_fixup
77
78 @worker_fixup.setter
79 def worker_fixup(self, value):
80 self._worker_fixup = value
81
82 def on_import_modules(self, **kwargs):
83 # call django.setup() before task modules are imported
84 self.worker_fixup.validate_models()
85
86 def on_worker_init(self, **kwargs):
87 self.worker_fixup.install()
88
89 def now(self, utc=False):
90 return datetime.utcnow() if utc else self._now()
91
92 def autodiscover_tasks(self):
93 from django.apps import apps
94 return [config.name for config in apps.get_app_configs()]
95
96 @cached_property
97 def _now(self):
98 return symbol_by_name('django.utils.timezone:now')
99
100
101 class DjangoWorkerFixup(object):
102 _db_recycles = 0
103
104 def __init__(self, app):
105 self.app = app
106 self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None)
107 self._db = import_module('django.db')
108 self._cache = import_module('django.core.cache')
109 self._settings = symbol_by_name('django.conf:settings')
110
111 self.interface_errors = (
112 symbol_by_name('django.db.utils.InterfaceError'),
113 )
114 self.DatabaseError = symbol_by_name('django.db:DatabaseError')
115
116 def django_setup(self):
117 import django
118 django.setup()
119
120 def validate_models(self):
121 from django.core.checks import run_checks
122 self.django_setup()
123 run_checks()
124
125 def install(self):
126 signals.beat_embedded_init.connect(self.close_database)
127 signals.worker_ready.connect(self.on_worker_ready)
128 signals.task_prerun.connect(self.on_task_prerun)
129 signals.task_postrun.connect(self.on_task_postrun)
130 signals.worker_process_init.connect(self.on_worker_process_init)
131 self.close_database()
132 self.close_cache()
133 return self
134
135 def on_worker_process_init(self, **kwargs):
136 # Child process must validate models again if on Windows,
137 # or if they were started using execv.
138 if os.environ.get('FORKED_BY_MULTIPROCESSING'):
139 self.validate_models()
140
141 # close connections:
142 # the parent process may have established these,
143 # so need to close them.
144
145 # calling db.close() on some DB connections will cause
146 # the inherited DB conn to also get broken in the parent
147 # process so we need to remove it without triggering any
148 # network IO that close() might cause.
149 for c in self._db.connections.all():
150 if c and c.connection:
151 self._maybe_close_db_fd(c.connection)
152
153 # use the _ version to avoid DB_REUSE preventing the conn.close() call
154 self._close_database()
155 self.close_cache()
156
157 def _maybe_close_db_fd(self, fd):
158 try:
159 _maybe_close_fd(fd)
160 except self.interface_errors:
161 pass
162
163 def on_task_prerun(self, sender, **kwargs):
164 """Called before every task."""
165 if not getattr(sender.request, 'is_eager', False):
166 self.close_database()
167
168 def on_task_postrun(self, sender, **kwargs):
169 # See https://groups.google.com/group/django-users/
170 # browse_thread/thread/78200863d0c07c6d/
171 if not getattr(sender.request, 'is_eager', False):
172 self.close_database()
173 self.close_cache()
174
175 def close_database(self, **kwargs):
176 if not self.db_reuse_max:
177 return self._close_database()
178 if self._db_recycles >= self.db_reuse_max * 2:
179 self._db_recycles = 0
180 self._close_database()
181 self._db_recycles += 1
182
183 def _close_database(self):
184 for conn in self._db.connections.all():
185 try:
186 conn.close_if_unusable_or_obsolete()
187 except self.interface_errors:
188 pass
189 except self.DatabaseError as exc:
190 str_exc = str(exc)
191 if 'closed' not in str_exc and 'not connected' not in str_exc:
192 raise
193
194 def close_cache(self):
195 try:
196 self._cache.close_caches()
197 except (TypeError, AttributeError):
198 pass
199
200 def on_worker_ready(self, **kwargs):
201 if self._settings.DEBUG:
202 warnings.warn('Using settings.DEBUG leads to a memory leak, never '
203 'use this setting in production environments!')
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/fixups/django.py b/celery/fixups/django.py
--- a/celery/fixups/django.py
+++ b/celery/fixups/django.py
@@ -60,7 +60,7 @@
# Need to add project directory to path.
# The project directory has precedence over system modules,
# so we prepend it to the path.
- sys.path.prepend(os.getcwd())
+ sys.path.insert(0, os.getcwd())
self._settings = symbol_by_name('django.conf:settings')
self.app.loader.now = self.now
|
{"golden_diff": "diff --git a/celery/fixups/django.py b/celery/fixups/django.py\n--- a/celery/fixups/django.py\n+++ b/celery/fixups/django.py\n@@ -60,7 +60,7 @@\n # Need to add project directory to path.\n # The project directory has precedence over system modules,\n # so we prepend it to the path.\n- sys.path.prepend(os.getcwd())\n+ sys.path.insert(0, os.getcwd())\n \n self._settings = symbol_by_name('django.conf:settings')\n self.app.loader.now = self.now\n", "issue": "Prepend to sys.path in the Django fixup instead of appending.\nThis makes sure that project modules have precedence over system ones.\r\nCloses #5347.\r\n\r\n## Description\r\n\r\nThis follows @Phyks's suggestion of a fix for #5347, by prepending instead of appending to the system path, to ensure that the project modules are not hidden by system-wide ones.\r\n\n", "before_files": [{"content": "\"\"\"Django-specific customization.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nimport warnings\nfrom datetime import datetime\nfrom importlib import import_module\n\nfrom kombu.utils.imports import symbol_by_name\nfrom kombu.utils.objects import cached_property\n\nfrom celery import _state, signals\nfrom celery.exceptions import FixupWarning, ImproperlyConfigured\n\n__all__ = ('DjangoFixup', 'fixup')\n\nERR_NOT_INSTALLED = \"\"\"\\\nEnvironment variable DJANGO_SETTINGS_MODULE is defined\nbut Django isn't installed. Won't apply Django fix-ups!\n\"\"\"\n\n\ndef _maybe_close_fd(fh):\n try:\n os.close(fh.fileno())\n except (AttributeError, OSError, TypeError):\n # TypeError added for celery#962\n pass\n\n\ndef _verify_django_version(django):\n if django.VERSION < (1, 11):\n raise ImproperlyConfigured('Celery 4.x requires Django 1.11 or later.')\n\n\ndef fixup(app, env='DJANGO_SETTINGS_MODULE'):\n \"\"\"Install Django fixup if settings module environment is set.\"\"\"\n SETTINGS_MODULE = os.environ.get(env)\n if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():\n try:\n import django # noqa\n except ImportError:\n warnings.warn(FixupWarning(ERR_NOT_INSTALLED))\n else:\n _verify_django_version(django)\n return DjangoFixup(app).install()\n\n\nclass DjangoFixup(object):\n \"\"\"Fixup installed when using Django.\"\"\"\n\n def __init__(self, app):\n self.app = app\n if _state.default_app is None:\n self.app.set_default()\n self._worker_fixup = None\n\n def install(self):\n # Need to add project directory to path.\n # The project directory has precedence over system modules,\n # so we prepend it to the path.\n sys.path.prepend(os.getcwd())\n\n self._settings = symbol_by_name('django.conf:settings')\n self.app.loader.now = self.now\n\n signals.import_modules.connect(self.on_import_modules)\n signals.worker_init.connect(self.on_worker_init)\n return self\n\n @property\n def worker_fixup(self):\n if self._worker_fixup is None:\n self._worker_fixup = DjangoWorkerFixup(self.app)\n return self._worker_fixup\n\n @worker_fixup.setter\n def worker_fixup(self, value):\n self._worker_fixup = value\n\n def on_import_modules(self, **kwargs):\n # call django.setup() before task modules are imported\n self.worker_fixup.validate_models()\n\n def on_worker_init(self, **kwargs):\n self.worker_fixup.install()\n\n def now(self, utc=False):\n return datetime.utcnow() if utc else self._now()\n\n def autodiscover_tasks(self):\n from django.apps import apps\n return [config.name for config in apps.get_app_configs()]\n\n @cached_property\n def _now(self):\n return symbol_by_name('django.utils.timezone:now')\n\n\nclass DjangoWorkerFixup(object):\n _db_recycles = 0\n\n def __init__(self, app):\n self.app = app\n self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None)\n self._db = import_module('django.db')\n self._cache = import_module('django.core.cache')\n self._settings = symbol_by_name('django.conf:settings')\n\n self.interface_errors = (\n symbol_by_name('django.db.utils.InterfaceError'),\n )\n self.DatabaseError = symbol_by_name('django.db:DatabaseError')\n\n def django_setup(self):\n import django\n django.setup()\n\n def validate_models(self):\n from django.core.checks import run_checks\n self.django_setup()\n run_checks()\n\n def install(self):\n signals.beat_embedded_init.connect(self.close_database)\n signals.worker_ready.connect(self.on_worker_ready)\n signals.task_prerun.connect(self.on_task_prerun)\n signals.task_postrun.connect(self.on_task_postrun)\n signals.worker_process_init.connect(self.on_worker_process_init)\n self.close_database()\n self.close_cache()\n return self\n\n def on_worker_process_init(self, **kwargs):\n # Child process must validate models again if on Windows,\n # or if they were started using execv.\n if os.environ.get('FORKED_BY_MULTIPROCESSING'):\n self.validate_models()\n\n # close connections:\n # the parent process may have established these,\n # so need to close them.\n\n # calling db.close() on some DB connections will cause\n # the inherited DB conn to also get broken in the parent\n # process so we need to remove it without triggering any\n # network IO that close() might cause.\n for c in self._db.connections.all():\n if c and c.connection:\n self._maybe_close_db_fd(c.connection)\n\n # use the _ version to avoid DB_REUSE preventing the conn.close() call\n self._close_database()\n self.close_cache()\n\n def _maybe_close_db_fd(self, fd):\n try:\n _maybe_close_fd(fd)\n except self.interface_errors:\n pass\n\n def on_task_prerun(self, sender, **kwargs):\n \"\"\"Called before every task.\"\"\"\n if not getattr(sender.request, 'is_eager', False):\n self.close_database()\n\n def on_task_postrun(self, sender, **kwargs):\n # See https://groups.google.com/group/django-users/\n # browse_thread/thread/78200863d0c07c6d/\n if not getattr(sender.request, 'is_eager', False):\n self.close_database()\n self.close_cache()\n\n def close_database(self, **kwargs):\n if not self.db_reuse_max:\n return self._close_database()\n if self._db_recycles >= self.db_reuse_max * 2:\n self._db_recycles = 0\n self._close_database()\n self._db_recycles += 1\n\n def _close_database(self):\n for conn in self._db.connections.all():\n try:\n conn.close_if_unusable_or_obsolete()\n except self.interface_errors:\n pass\n except self.DatabaseError as exc:\n str_exc = str(exc)\n if 'closed' not in str_exc and 'not connected' not in str_exc:\n raise\n\n def close_cache(self):\n try:\n self._cache.close_caches()\n except (TypeError, AttributeError):\n pass\n\n def on_worker_ready(self, **kwargs):\n if self._settings.DEBUG:\n warnings.warn('Using settings.DEBUG leads to a memory leak, never '\n 'use this setting in production environments!')\n", "path": "celery/fixups/django.py"}], "after_files": [{"content": "\"\"\"Django-specific customization.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nimport warnings\nfrom datetime import datetime\nfrom importlib import import_module\n\nfrom kombu.utils.imports import symbol_by_name\nfrom kombu.utils.objects import cached_property\n\nfrom celery import _state, signals\nfrom celery.exceptions import FixupWarning, ImproperlyConfigured\n\n__all__ = ('DjangoFixup', 'fixup')\n\nERR_NOT_INSTALLED = \"\"\"\\\nEnvironment variable DJANGO_SETTINGS_MODULE is defined\nbut Django isn't installed. Won't apply Django fix-ups!\n\"\"\"\n\n\ndef _maybe_close_fd(fh):\n try:\n os.close(fh.fileno())\n except (AttributeError, OSError, TypeError):\n # TypeError added for celery#962\n pass\n\n\ndef _verify_django_version(django):\n if django.VERSION < (1, 11):\n raise ImproperlyConfigured('Celery 4.x requires Django 1.11 or later.')\n\n\ndef fixup(app, env='DJANGO_SETTINGS_MODULE'):\n \"\"\"Install Django fixup if settings module environment is set.\"\"\"\n SETTINGS_MODULE = os.environ.get(env)\n if SETTINGS_MODULE and 'django' not in app.loader_cls.lower():\n try:\n import django # noqa\n except ImportError:\n warnings.warn(FixupWarning(ERR_NOT_INSTALLED))\n else:\n _verify_django_version(django)\n return DjangoFixup(app).install()\n\n\nclass DjangoFixup(object):\n \"\"\"Fixup installed when using Django.\"\"\"\n\n def __init__(self, app):\n self.app = app\n if _state.default_app is None:\n self.app.set_default()\n self._worker_fixup = None\n\n def install(self):\n # Need to add project directory to path.\n # The project directory has precedence over system modules,\n # so we prepend it to the path.\n sys.path.insert(0, os.getcwd())\n\n self._settings = symbol_by_name('django.conf:settings')\n self.app.loader.now = self.now\n\n signals.import_modules.connect(self.on_import_modules)\n signals.worker_init.connect(self.on_worker_init)\n return self\n\n @property\n def worker_fixup(self):\n if self._worker_fixup is None:\n self._worker_fixup = DjangoWorkerFixup(self.app)\n return self._worker_fixup\n\n @worker_fixup.setter\n def worker_fixup(self, value):\n self._worker_fixup = value\n\n def on_import_modules(self, **kwargs):\n # call django.setup() before task modules are imported\n self.worker_fixup.validate_models()\n\n def on_worker_init(self, **kwargs):\n self.worker_fixup.install()\n\n def now(self, utc=False):\n return datetime.utcnow() if utc else self._now()\n\n def autodiscover_tasks(self):\n from django.apps import apps\n return [config.name for config in apps.get_app_configs()]\n\n @cached_property\n def _now(self):\n return symbol_by_name('django.utils.timezone:now')\n\n\nclass DjangoWorkerFixup(object):\n _db_recycles = 0\n\n def __init__(self, app):\n self.app = app\n self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None)\n self._db = import_module('django.db')\n self._cache = import_module('django.core.cache')\n self._settings = symbol_by_name('django.conf:settings')\n\n self.interface_errors = (\n symbol_by_name('django.db.utils.InterfaceError'),\n )\n self.DatabaseError = symbol_by_name('django.db:DatabaseError')\n\n def django_setup(self):\n import django\n django.setup()\n\n def validate_models(self):\n from django.core.checks import run_checks\n self.django_setup()\n run_checks()\n\n def install(self):\n signals.beat_embedded_init.connect(self.close_database)\n signals.worker_ready.connect(self.on_worker_ready)\n signals.task_prerun.connect(self.on_task_prerun)\n signals.task_postrun.connect(self.on_task_postrun)\n signals.worker_process_init.connect(self.on_worker_process_init)\n self.close_database()\n self.close_cache()\n return self\n\n def on_worker_process_init(self, **kwargs):\n # Child process must validate models again if on Windows,\n # or if they were started using execv.\n if os.environ.get('FORKED_BY_MULTIPROCESSING'):\n self.validate_models()\n\n # close connections:\n # the parent process may have established these,\n # so need to close them.\n\n # calling db.close() on some DB connections will cause\n # the inherited DB conn to also get broken in the parent\n # process so we need to remove it without triggering any\n # network IO that close() might cause.\n for c in self._db.connections.all():\n if c and c.connection:\n self._maybe_close_db_fd(c.connection)\n\n # use the _ version to avoid DB_REUSE preventing the conn.close() call\n self._close_database()\n self.close_cache()\n\n def _maybe_close_db_fd(self, fd):\n try:\n _maybe_close_fd(fd)\n except self.interface_errors:\n pass\n\n def on_task_prerun(self, sender, **kwargs):\n \"\"\"Called before every task.\"\"\"\n if not getattr(sender.request, 'is_eager', False):\n self.close_database()\n\n def on_task_postrun(self, sender, **kwargs):\n # See https://groups.google.com/group/django-users/\n # browse_thread/thread/78200863d0c07c6d/\n if not getattr(sender.request, 'is_eager', False):\n self.close_database()\n self.close_cache()\n\n def close_database(self, **kwargs):\n if not self.db_reuse_max:\n return self._close_database()\n if self._db_recycles >= self.db_reuse_max * 2:\n self._db_recycles = 0\n self._close_database()\n self._db_recycles += 1\n\n def _close_database(self):\n for conn in self._db.connections.all():\n try:\n conn.close_if_unusable_or_obsolete()\n except self.interface_errors:\n pass\n except self.DatabaseError as exc:\n str_exc = str(exc)\n if 'closed' not in str_exc and 'not connected' not in str_exc:\n raise\n\n def close_cache(self):\n try:\n self._cache.close_caches()\n except (TypeError, AttributeError):\n pass\n\n def on_worker_ready(self, **kwargs):\n if self._settings.DEBUG:\n warnings.warn('Using settings.DEBUG leads to a memory leak, never '\n 'use this setting in production environments!')\n", "path": "celery/fixups/django.py"}]}
| 2,310 | 134 |
gh_patches_debug_6070
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-848
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accessing /buckets/default/collections/ raise a 500
Verified with Kinto 4.1.1 and 4.3.0
```
$ http https://kinto.dev.mozaws.net/v1/buckets/default/collections/ --auth toto:toot
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/tweens.py", line 22, in excview_tween
response = handler(request)
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid_tm/__init__.py", line 109, in tm_tween
reraise(*exc_info)
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid_tm/__init__.py", line 88, in tm_tween
response = handler(request)
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/router.py", line 158, in handle_request
view_name
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/view.py", line 547, in _call_view
response = view_callable(context, request)
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/viewderivers.py", line 413, in viewresult_to_response
result = view(context, request)
File "~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/viewderivers.py", line 147, in _requestonly_view
response = view(request)
File "~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py", line 140, in default_bucket
create_collection(request, bucket_id)
File "~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py", line 63, in create_collection
uri=collection_uri)
File "~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py", line 74, in resource_create_object
resource_name, matchdict = view_lookup(request, uri)
File "~/venvs/kinto/local/lib/python2.7/site-packages/kinto/core/utils.py", line 415, in view_lookup
raise ValueError("URI has no route")
ValueError: URI has no route
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/default_bucket/__init__.py`
Content:
```
1 import uuid
2
3 import six
4 from pyramid import httpexceptions
5 from pyramid.settings import asbool
6 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
7
8 from kinto.core.errors import raise_invalid
9 from kinto.core.events import ACTIONS
10 from kinto.core.utils import (
11 build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)
12 from kinto.core.storage import exceptions as storage_exceptions
13
14 from kinto.authorization import RouteFactory
15 from kinto.views.buckets import Bucket
16 from kinto.views.collections import Collection
17
18
19 def create_bucket(request, bucket_id):
20 """Create a bucket if it doesn't exists."""
21 bucket_put = (request.method.lower() == 'put' and
22 request.path.endswith('buckets/default'))
23 # Do nothing if current request will already create the bucket.
24 if bucket_put:
25 return
26
27 # Do not intent to create multiple times per request (e.g. in batch).
28 already_created = request.bound_data.setdefault('buckets', {})
29 if bucket_id in already_created:
30 return
31
32 bucket_uri = instance_uri(request, 'bucket', id=bucket_id)
33 bucket = resource_create_object(request=request,
34 resource_cls=Bucket,
35 uri=bucket_uri)
36 already_created[bucket_id] = bucket
37
38
39 def create_collection(request, bucket_id):
40 # Do nothing if current request does not involve a collection.
41 subpath = request.matchdict.get('subpath')
42 if not (subpath and subpath.startswith('collections/')):
43 return
44
45 collection_id = subpath.split('/')[1]
46 collection_uri = instance_uri(request, 'collection',
47 bucket_id=bucket_id,
48 id=collection_id)
49
50 # Do not intent to create multiple times per request (e.g. in batch).
51 already_created = request.bound_data.setdefault('collections', {})
52 if collection_uri in already_created:
53 return
54
55 # Do nothing if current request will already create the collection.
56 collection_put = (request.method.lower() == 'put' and
57 request.path.endswith(collection_id))
58 if collection_put:
59 return
60
61 collection = resource_create_object(request=request,
62 resource_cls=Collection,
63 uri=collection_uri)
64 already_created[collection_uri] = collection
65
66
67 def resource_create_object(request, resource_cls, uri):
68 """In the default bucket, the bucket and collection are implicitly
69 created. This helper instantiate the resource and simulate a request
70 with its RootFactory on the instantiated resource.
71 :returns: the created object
72 :rtype: dict
73 """
74 resource_name, matchdict = view_lookup(request, uri)
75
76 # Build a fake request, mainly used to populate the create events that
77 # will be triggered by the resource.
78 fakerequest = build_request(request, {
79 'method': 'PUT',
80 'path': uri,
81 })
82 fakerequest.matchdict = matchdict
83 fakerequest.bound_data = request.bound_data
84 fakerequest.authn_type = request.authn_type
85 fakerequest.selected_userid = request.selected_userid
86 fakerequest.errors = request.errors
87 fakerequest.current_resource_name = resource_name
88
89 obj_id = matchdict['id']
90
91 # Fake context, required to instantiate a resource.
92 context = RouteFactory(fakerequest)
93 context.resource_name = resource_name
94 resource = resource_cls(fakerequest, context)
95
96 # Check that provided id is valid for this resource.
97 if not resource.model.id_generator.match(obj_id):
98 error_details = {
99 'location': 'path',
100 'description': "Invalid %s id" % resource_name
101 }
102 raise_invalid(resource.request, **error_details)
103
104 data = {'id': obj_id}
105 try:
106 obj = resource.model.create_record(data)
107 # Since the current request is not a resource (but a straight Service),
108 # we simulate a request on a resource.
109 # This will be used in the resource event payload.
110 resource.postprocess(data, action=ACTIONS.CREATE)
111 except storage_exceptions.UnicityError as e:
112 obj = e.record
113 return obj
114
115
116 def default_bucket(request):
117 if request.method.lower() == 'options':
118 path = request.path.replace('default', 'unknown')
119 subrequest = build_request(request, {
120 'method': 'OPTIONS',
121 'path': path
122 })
123 return request.invoke_subrequest(subrequest)
124
125 if Authenticated not in request.effective_principals:
126 # Pass through the forbidden_view_config
127 raise httpexceptions.HTTPForbidden()
128
129 settings = request.registry.settings
130
131 if asbool(settings['readonly']):
132 raise httpexceptions.HTTPMethodNotAllowed()
133
134 bucket_id = request.default_bucket_id
135
136 # Implicit object creations.
137 # Make sure bucket exists
138 create_bucket(request, bucket_id)
139 # Make sure the collection exists
140 create_collection(request, bucket_id)
141
142 path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)
143 querystring = request.url[(request.url.index(request.path) +
144 len(request.path)):]
145 try:
146 # If 'id' is provided as 'default', replace with actual bucket id.
147 body = request.json
148 body['data']['id'] = body['data']['id'].replace('default', bucket_id)
149 except:
150 body = request.body
151 subrequest = build_request(request, {
152 'method': request.method,
153 'path': path + querystring,
154 'body': body,
155 })
156 subrequest.bound_data = request.bound_data
157
158 try:
159 response = request.invoke_subrequest(subrequest)
160 except httpexceptions.HTTPException as error:
161 is_redirect = error.status_code < 400
162 if error.content_type == 'application/json' or is_redirect:
163 response = reapply_cors(subrequest, error)
164 else:
165 # Ask the upper level to format the error.
166 raise error
167 return response
168
169
170 def default_bucket_id(request):
171 settings = request.registry.settings
172 secret = settings['userid_hmac_secret']
173 # Build the user unguessable bucket_id UUID from its user_id
174 digest = hmac_digest(secret, request.prefixed_userid)
175 return six.text_type(uuid.UUID(digest[:32]))
176
177
178 def get_user_info(request):
179 user_info = {
180 'id': request.prefixed_userid,
181 'bucket': request.default_bucket_id
182 }
183 return user_info
184
185
186 def includeme(config):
187 # Redirect default to the right endpoint
188 config.add_view(default_bucket,
189 route_name='default_bucket',
190 permission=NO_PERMISSION_REQUIRED)
191 config.add_view(default_bucket,
192 route_name='default_bucket_collection',
193 permission=NO_PERMISSION_REQUIRED)
194
195 config.add_route('default_bucket_collection',
196 '/buckets/default/{subpath:.*}')
197 config.add_route('default_bucket', '/buckets/default')
198
199 # Provide helpers
200 config.add_request_method(default_bucket_id, reify=True)
201 # Override kinto.core default user info
202 config.add_request_method(get_user_info)
203
204 config.add_api_capability(
205 "default_bucket",
206 description="The default bucket is an alias for a personal"
207 " bucket where collections are created implicitly.",
208 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
209 "buckets.html#personal-bucket-default")
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py
--- a/kinto/plugins/default_bucket/__init__.py
+++ b/kinto/plugins/default_bucket/__init__.py
@@ -39,7 +39,7 @@
def create_collection(request, bucket_id):
# Do nothing if current request does not involve a collection.
subpath = request.matchdict.get('subpath')
- if not (subpath and subpath.startswith('collections/')):
+ if not (subpath and subpath.rstrip('/').startswith('collections/')):
return
collection_id = subpath.split('/')[1]
|
{"golden_diff": "diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py\n--- a/kinto/plugins/default_bucket/__init__.py\n+++ b/kinto/plugins/default_bucket/__init__.py\n@@ -39,7 +39,7 @@\n def create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n- if not (subpath and subpath.startswith('collections/')):\n+ if not (subpath and subpath.rstrip('/').startswith('collections/')):\n return\n \n collection_id = subpath.split('/')[1]\n", "issue": "Accessing /buckets/default/collections/ raise a 500\nVerified with Kinto 4.1.1 and 4.3.0\n\n```\n$ http https://kinto.dev.mozaws.net/v1/buckets/default/collections/ --auth toto:toot\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/tweens.py\", line 22, in excview_tween\n response = handler(request)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid_tm/__init__.py\", line 109, in tm_tween\n reraise(*exc_info)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid_tm/__init__.py\", line 88, in tm_tween\n response = handler(request)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/router.py\", line 158, in handle_request\n view_name\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/view.py\", line 547, in _call_view\n response = view_callable(context, request)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/viewderivers.py\", line 413, in viewresult_to_response\n result = view(context, request)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/pyramid/viewderivers.py\", line 147, in _requestonly_view\n response = view(request)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py\", line 140, in default_bucket\n create_collection(request, bucket_id)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py\", line 63, in create_collection\n uri=collection_uri)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/kinto/plugins/default_bucket/__init__.py\", line 74, in resource_create_object\n resource_name, matchdict = view_lookup(request, uri)\n File \"~/venvs/kinto/local/lib/python2.7/site-packages/kinto/core/utils.py\", line 415, in view_lookup\n raise ValueError(\"URI has no route\")\nValueError: URI has no route\n```\n\n", "before_files": [{"content": "import uuid\n\nimport six\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core.errors import raise_invalid\nfrom kinto.core.events import ACTIONS\nfrom kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\nfrom kinto.core.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket_uri = instance_uri(request, 'bucket', id=bucket_id)\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri=bucket_uri)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = instance_uri(request, 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri)\n already_created[collection_uri] = collection\n\n\ndef resource_create_object(request, resource_cls, uri):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n resource_name, matchdict = view_lookup(request, uri)\n\n # Build a fake request, mainly used to populate the create events that\n # will be triggered by the resource.\n fakerequest = build_request(request, {\n 'method': 'PUT',\n 'path': uri,\n })\n fakerequest.matchdict = matchdict\n fakerequest.bound_data = request.bound_data\n fakerequest.authn_type = request.authn_type\n fakerequest.selected_userid = request.selected_userid\n fakerequest.errors = request.errors\n fakerequest.current_resource_name = resource_name\n\n obj_id = matchdict['id']\n\n # Fake context, required to instantiate a resource.\n context = RouteFactory(fakerequest)\n context.resource_name = resource_name\n resource = resource_cls(fakerequest, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid %s id\" % resource_name\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n try:\n obj = resource.model.create_record(data)\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.postprocess(data, action=ACTIONS.CREATE)\n except storage_exceptions.UnicityError as e:\n obj = e.record\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n\n # Implicit object creations.\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n try:\n # If 'id' is provided as 'default', replace with actual bucket id.\n body = request.json\n body['data']['id'] = body['data']['id'].replace('default', bucket_id)\n except:\n body = request.body\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': body,\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return six.text_type(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override kinto.core default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}], "after_files": [{"content": "import uuid\n\nimport six\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom kinto.core.errors import raise_invalid\nfrom kinto.core.events import ACTIONS\nfrom kinto.core.utils import (\n build_request, reapply_cors, hmac_digest, instance_uri, view_lookup)\nfrom kinto.core.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket_uri = instance_uri(request, 'bucket', id=bucket_id)\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri=bucket_uri)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.rstrip('/').startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = instance_uri(request, 'collection',\n bucket_id=bucket_id,\n id=collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri)\n already_created[collection_uri] = collection\n\n\ndef resource_create_object(request, resource_cls, uri):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n resource_name, matchdict = view_lookup(request, uri)\n\n # Build a fake request, mainly used to populate the create events that\n # will be triggered by the resource.\n fakerequest = build_request(request, {\n 'method': 'PUT',\n 'path': uri,\n })\n fakerequest.matchdict = matchdict\n fakerequest.bound_data = request.bound_data\n fakerequest.authn_type = request.authn_type\n fakerequest.selected_userid = request.selected_userid\n fakerequest.errors = request.errors\n fakerequest.current_resource_name = resource_name\n\n obj_id = matchdict['id']\n\n # Fake context, required to instantiate a resource.\n context = RouteFactory(fakerequest)\n context.resource_name = resource_name\n resource = resource_cls(fakerequest, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid %s id\" % resource_name\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n try:\n obj = resource.model.create_record(data)\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.postprocess(data, action=ACTIONS.CREATE)\n except storage_exceptions.UnicityError as e:\n obj = e.record\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n\n # Implicit object creations.\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n try:\n # If 'id' is provided as 'default', replace with actual bucket id.\n body = request.json\n body['data']['id'] = body['data']['id'].replace('default', bucket_id)\n except:\n body = request.body\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': body,\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return six.text_type(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override kinto.core default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}]}
| 2,872 | 139 |
gh_patches_debug_24871
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-623
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
add test session to nox without installing any "extras"
https://github.com/googleapis/python-bigquery/pull/613 is making me a bit nervous that we might accidentally introduce a required dependency that we thought was optional. It wouldn't be the first time this has happened (https://github.com/googleapis/python-bigquery/issues/549), so I'd like at least a unit test session that runs without any extras.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import pathlib
18 import os
19 import shutil
20
21 import nox
22
23
24 BLACK_VERSION = "black==19.10b0"
25 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
26
27 DEFAULT_PYTHON_VERSION = "3.8"
28 SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
29 UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
30 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
31
32 # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
33 nox.options.sessions = [
34 "unit",
35 "system",
36 "snippets",
37 "cover",
38 "lint",
39 "lint_setup_py",
40 "blacken",
41 "docs",
42 ]
43
44
45 def default(session):
46 """Default unit test session.
47
48 This is intended to be run **without** an interpreter set, so
49 that the current ``python`` (on the ``PATH``) or the version of
50 Python corresponding to the ``nox`` binary the ``PATH`` can
51 run the tests.
52 """
53 constraints_path = str(
54 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
55 )
56
57 # Install all test dependencies, then install local packages in-place.
58 session.install(
59 "mock",
60 "pytest",
61 "google-cloud-testutils",
62 "pytest-cov",
63 "freezegun",
64 "-c",
65 constraints_path,
66 )
67
68 session.install("-e", ".[all]", "-c", constraints_path)
69
70 session.install("ipython", "-c", constraints_path)
71
72 # Run py.test against the unit tests.
73 session.run(
74 "py.test",
75 "--quiet",
76 "--cov=google.cloud.bigquery",
77 "--cov=tests.unit",
78 "--cov-append",
79 "--cov-config=.coveragerc",
80 "--cov-report=",
81 "--cov-fail-under=0",
82 os.path.join("tests", "unit"),
83 *session.posargs,
84 )
85
86
87 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
88 def unit(session):
89 """Run the unit test suite."""
90 default(session)
91
92
93 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
94 def system(session):
95 """Run the system test suite."""
96
97 constraints_path = str(
98 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
99 )
100
101 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
102 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
103 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
104
105 # Sanity check: Only run system tests if the environment variable is set.
106 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
107 session.skip("Credentials must be set via environment variable.")
108
109 # Use pre-release gRPC for system tests.
110 session.install("--pre", "grpcio", "-c", constraints_path)
111
112 # Install all test dependencies, then install local packages in place.
113 session.install(
114 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
115 )
116 if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true":
117 # mTLS test requires pyopenssl and latest google-cloud-storage
118 session.install("google-cloud-storage", "pyopenssl")
119 else:
120 session.install("google-cloud-storage", "-c", constraints_path)
121
122 session.install("-e", ".[all]", "-c", constraints_path)
123 session.install("ipython", "-c", constraints_path)
124
125 # Run py.test against the system tests.
126 session.run("py.test", "--quiet", os.path.join("tests", "system"), *session.posargs)
127
128
129 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
130 def snippets(session):
131 """Run the snippets test suite."""
132
133 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.
134 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false":
135 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping")
136
137 # Sanity check: Only run snippets tests if the environment variable is set.
138 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
139 session.skip("Credentials must be set via environment variable.")
140
141 constraints_path = str(
142 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
143 )
144
145 # Install all test dependencies, then install local packages in place.
146 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
147 session.install("google-cloud-storage", "-c", constraints_path)
148 session.install("grpcio", "-c", constraints_path)
149
150 session.install("-e", ".[all]", "-c", constraints_path)
151
152 # Run py.test against the snippets tests.
153 # Skip tests in samples/snippets, as those are run in a different session
154 # using the nox config from that directory.
155 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
156 session.run(
157 "py.test",
158 "samples",
159 "--ignore=samples/snippets",
160 "--ignore=samples/geography",
161 *session.posargs,
162 )
163
164
165 @nox.session(python=DEFAULT_PYTHON_VERSION)
166 def cover(session):
167 """Run the final coverage report.
168
169 This outputs the coverage report aggregating coverage from the unit
170 test runs (not system test runs), and then erases coverage data.
171 """
172 session.install("coverage", "pytest-cov")
173 session.run("coverage", "report", "--show-missing", "--fail-under=100")
174 session.run("coverage", "erase")
175
176
177 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
178 def prerelease_deps(session):
179 """Run all tests with prerelease versions of dependencies installed.
180
181 https://github.com/googleapis/python-bigquery/issues/95
182 """
183 # PyArrow prerelease packages are published to an alternative PyPI host.
184 # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
185 session.install(
186 "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow"
187 )
188 session.install("--pre", "grpcio", "pandas")
189 session.install(
190 "freezegun",
191 "google-cloud-storage",
192 "google-cloud-testutils",
193 "IPython",
194 "mock",
195 "psutil",
196 "pytest",
197 "pytest-cov",
198 )
199 session.install("-e", ".[all]")
200
201 # Print out prerelease package versions.
202 session.run("python", "-c", "import grpc; print(grpc.__version__)")
203 session.run("python", "-c", "import pandas; print(pandas.__version__)")
204 session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
205
206 # Run all tests, except a few samples tests which require extra dependencies.
207 session.run("py.test", "tests/unit")
208 session.run("py.test", "tests/system")
209 session.run("py.test", "samples/tests")
210
211
212 @nox.session(python=DEFAULT_PYTHON_VERSION)
213 def lint(session):
214 """Run linters.
215
216 Returns a failure if the linters find linting errors or sufficiently
217 serious code quality issues.
218 """
219
220 session.install("flake8", BLACK_VERSION)
221 session.install("-e", ".")
222 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
223 session.run("flake8", "tests")
224 session.run("flake8", os.path.join("docs", "samples"))
225 session.run("flake8", os.path.join("docs", "snippets.py"))
226 session.run("black", "--check", *BLACK_PATHS)
227
228
229 @nox.session(python=DEFAULT_PYTHON_VERSION)
230 def lint_setup_py(session):
231 """Verify that setup.py is valid (including RST check)."""
232
233 session.install("docutils", "Pygments")
234 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
235
236
237 @nox.session(python="3.6")
238 def blacken(session):
239 """Run black.
240 Format code to uniform standard.
241
242 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
243 That run uses an image that doesn't have 3.6 installed. Before updating this
244 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
245 """
246 session.install(BLACK_VERSION)
247 session.run("black", *BLACK_PATHS)
248
249
250 @nox.session(python=DEFAULT_PYTHON_VERSION)
251 def docs(session):
252 """Build the docs."""
253
254 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
255 session.install("google-cloud-storage")
256 session.install("-e", ".[all]")
257
258 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
259 session.run(
260 "sphinx-build",
261 "-W", # warnings as errors
262 "-T", # show full traceback on exception
263 "-N", # no colors
264 "-b",
265 "html",
266 "-d",
267 os.path.join("docs", "_build", "doctrees", ""),
268 os.path.join("docs", ""),
269 os.path.join("docs", "_build", "html", ""),
270 )
271
272
273 @nox.session(python=DEFAULT_PYTHON_VERSION)
274 def docfx(session):
275 """Build the docfx yaml files for this library."""
276
277 session.install("-e", ".")
278 session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
279
280 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
281 session.run(
282 "sphinx-build",
283 "-T", # show full traceback on exception
284 "-N", # no colors
285 "-D",
286 (
287 "extensions=sphinx.ext.autodoc,"
288 "sphinx.ext.autosummary,"
289 "docfx_yaml.extension,"
290 "sphinx.ext.intersphinx,"
291 "sphinx.ext.coverage,"
292 "sphinx.ext.napoleon,"
293 "sphinx.ext.todo,"
294 "sphinx.ext.viewcode,"
295 "recommonmark"
296 ),
297 "-b",
298 "html",
299 "-d",
300 os.path.join("docs", "_build", "doctrees", ""),
301 os.path.join("docs", ""),
302 os.path.join("docs", "_build", "html", ""),
303 )
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -31,6 +31,7 @@
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
+ "unit_noextras",
"unit",
"system",
"snippets",
@@ -42,7 +43,7 @@
]
-def default(session):
+def default(session, install_extras=True):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
@@ -65,7 +66,8 @@
constraints_path,
)
- session.install("-e", ".[all]", "-c", constraints_path)
+ install_target = ".[all]" if install_extras else "."
+ session.install("-e", install_target, "-c", constraints_path)
session.install("ipython", "-c", constraints_path)
@@ -90,6 +92,12 @@
default(session)
[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])
+def unit_noextras(session):
+ """Run the unit test suite."""
+ default(session, install_extras=False)
+
+
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -31,6 +31,7 @@\n \n # 'docfx' is excluded since it only needs to run in 'docs-presubmit'\n nox.options.sessions = [\n+ \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n@@ -42,7 +43,7 @@\n ]\n \n \n-def default(session):\n+def default(session, install_extras=True):\n \"\"\"Default unit test session.\n \n This is intended to be run **without** an interpreter set, so\n@@ -65,7 +66,8 @@\n constraints_path,\n )\n \n- session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n+ install_target = \".[all]\" if install_extras else \".\"\n+ session.install(\"-e\", install_target, \"-c\", constraints_path)\n \n session.install(\"ipython\", \"-c\", constraints_path)\n \n@@ -90,6 +92,12 @@\n default(session)\n \n \[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\n+def unit_noextras(session):\n+ \"\"\"Run the unit test suite.\"\"\"\n+ default(session, install_extras=False)\n+\n+\n @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n def system(session):\n \"\"\"Run the system test suite.\"\"\"\n", "issue": "add test session to nox without installing any \"extras\"\nhttps://github.com/googleapis/python-bigquery/pull/613 is making me a bit nervous that we might accidentally introduce a required dependency that we thought was optional. It wouldn't be the first time this has happened (https://github.com/googleapis/python-bigquery/issues/549), so I'd like at least a unit test session that runs without any extras.\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"docs\",\n]\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 3,613 | 309 |
gh_patches_debug_5511
|
rasdani/github-patches
|
git_diff
|
napalm-automation__napalm-692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip >= 10.0.0 incompatibility
I am not going to create these for every single napalm module... but did for https://github.com/napalm-automation/napalm-ansible/issues/123 where I also saw this issue
pip 10.x no longer provides pip.req as needed in setup.py
https://github.com/pypa/pip/issues/5156
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """setup.py file."""
2 import uuid
3
4 from setuptools import setup, find_packages
5
6 from pip.req import parse_requirements
7
8
9 install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
10 reqs = [str(ir.req) for ir in install_reqs]
11
12 __author__ = 'David Barroso <[email protected]>'
13
14 setup(
15 name="napalm",
16 version='2.3.0',
17 packages=find_packages(exclude=("test*", )),
18 test_suite='test_base',
19 author="David Barroso, Kirk Byers, Mircea Ulinic",
20 author_email="[email protected], [email protected], [email protected]",
21 description="Network Automation and Programmability Abstraction Layer with Multivendor support",
22 classifiers=[
23 'Topic :: Utilities',
24 'Programming Language :: Python',
25 'Programming Language :: Python :: 2',
26 'Programming Language :: Python :: 2.7',
27 'Programming Language :: Python :: 3',
28 'Programming Language :: Python :: 3.4',
29 'Programming Language :: Python :: 3.5',
30 'Programming Language :: Python :: 3.6',
31 'Operating System :: POSIX :: Linux',
32 'Operating System :: MacOS',
33 ],
34 url="https://github.com/napalm-automation/napalm",
35 include_package_data=True,
36 install_requires=reqs,
37 entry_points={
38 'console_scripts': [
39 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',
40 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',
41 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',
42 'napalm=napalm.base.clitools.cl_napalm:main',
43 ],
44 }
45 )
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,13 +1,9 @@
"""setup.py file."""
-import uuid
-
from setuptools import setup, find_packages
-from pip.req import parse_requirements
-
+with open("requirements.txt", "r") as fs:
+ reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith("#"))]
-install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
-reqs = [str(ir.req) for ir in install_reqs]
__author__ = 'David Barroso <[email protected]>'
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,13 +1,9 @@\n \"\"\"setup.py file.\"\"\"\n-import uuid\n-\n from setuptools import setup, find_packages\n \n-from pip.req import parse_requirements\n-\n+with open(\"requirements.txt\", \"r\") as fs:\n+ reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith(\"#\"))]\n \n-install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())\n-reqs = [str(ir.req) for ir in install_reqs]\n \n __author__ = 'David Barroso <[email protected]>'\n", "issue": "pip >= 10.0.0 incompatibility \nI am not going to create these for every single napalm module... but did for https://github.com/napalm-automation/napalm-ansible/issues/123 where I also saw this issue\r\n\r\npip 10.x no longer provides pip.req as needed in setup.py\r\nhttps://github.com/pypa/pip/issues/5156\r\n\n", "before_files": [{"content": "\"\"\"setup.py file.\"\"\"\nimport uuid\n\nfrom setuptools import setup, find_packages\n\nfrom pip.req import parse_requirements\n\n\ninstall_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())\nreqs = [str(ir.req) for ir in install_reqs]\n\n__author__ = 'David Barroso <[email protected]>'\n\nsetup(\n name=\"napalm\",\n version='2.3.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=reqs,\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"setup.py file.\"\"\"\nfrom setuptools import setup, find_packages\n\nwith open(\"requirements.txt\", \"r\") as fs:\n reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith(\"#\"))]\n\n\n__author__ = 'David Barroso <[email protected]>'\n\nsetup(\n name=\"napalm\",\n version='2.3.0',\n packages=find_packages(exclude=(\"test*\", )),\n test_suite='test_base',\n author=\"David Barroso, Kirk Byers, Mircea Ulinic\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Network Automation and Programmability Abstraction Layer with Multivendor support\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/napalm-automation/napalm\",\n include_package_data=True,\n install_requires=reqs,\n entry_points={\n 'console_scripts': [\n 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main',\n 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main',\n 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main',\n 'napalm=napalm.base.clitools.cl_napalm:main',\n ],\n }\n)\n", "path": "setup.py"}]}
| 839 | 155 |
gh_patches_debug_5832
|
rasdani/github-patches
|
git_diff
|
pretalx__pretalx-227
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test invite mechanisms
This is approximately the third time that I found critical issues in invite mechanisms (for the reviewer team, this time).
- [x] Test orga invitation
- [x] Test reviewing invitation
- [x] Test speaker invitation by orga
- [x] Test speaker invitation by other speakers
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretalx/event/models/event.py`
Content:
```
1 from datetime import datetime, time
2
3 import pytz
4 from django.conf import settings
5 from django.core.mail import get_connection
6 from django.core.mail.backends.base import BaseEmailBackend
7 from django.core.validators import RegexValidator
8 from django.db import models
9 from django.utils.functional import cached_property
10 from django.utils.timezone import make_aware
11 from django.utils.translation import ugettext_lazy as _
12 from i18nfield.fields import I18nCharField
13 from urlman import Urls
14
15 from pretalx.common.mixins import LogMixin
16 from pretalx.common.models.settings import settings_hierarkey
17
18 SLUG_CHARS = 'a-zA-Z0-9.-'
19
20
21 def event_css_path(instance, filename):
22 return f'{instance.slug}/css/{filename}'
23
24
25 def event_logo_path(instance, filename):
26 return f'{instance.slug}/img/{filename}'
27
28
29 @settings_hierarkey.add()
30 class Event(LogMixin, models.Model):
31 name = I18nCharField(
32 max_length=200,
33 verbose_name=_('Name'),
34 )
35 slug = models.SlugField(
36 max_length=50, db_index=True,
37 validators=[
38 RegexValidator(
39 regex=f"^[{SLUG_CHARS}]+$",
40 message=_('The slug may only contain letters, numbers, dots and dashes.'),
41 ),
42 ],
43 verbose_name=_("Short form"),
44 help_text=_('Should be short, only contain lowercase letters and numbers, and must be unique, as it is used in URLs.'),
45 )
46 subtitle = I18nCharField(
47 max_length=200,
48 null=True, blank=True,
49 verbose_name=_('Subtitle'),
50 help_text=_('A tagline, or motto, or description. Not mandatory.')
51 )
52 is_public = models.BooleanField(
53 default=False,
54 verbose_name=_('Event is public')
55 )
56 permitted = models.ManyToManyField(
57 to='person.User',
58 through='person.EventPermission',
59 related_name="events",
60 )
61 date_from = models.DateField(
62 verbose_name=_('Event start date'),
63 )
64 date_to = models.DateField(
65 verbose_name=_('Event end date'),
66 )
67 timezone = models.CharField(
68 choices=[(tz, tz) for tz in pytz.common_timezones],
69 max_length=30,
70 default='UTC',
71 )
72 email = models.EmailField(
73 verbose_name=_('Orga email address'),
74 help_text=_('Will be used as sender/reply-to in emails'),
75 )
76 primary_color = models.CharField(
77 max_length=7,
78 null=True, blank=True,
79 validators=[],
80 verbose_name=_('Main event color'),
81 help_text=_('Please provide a hex value like #00ff00 if you do not like pretalx colors.'),
82 )
83 custom_css = models.FileField(
84 upload_to=event_css_path,
85 null=True, blank=True,
86 verbose_name=_('Custom Event CSS'),
87 help_text=_('Upload a custom CSS file if changing the primary color is not sufficient for you.'),
88 )
89 logo = models.FileField(
90 upload_to=event_logo_path,
91 null=True, blank=True,
92 verbose_name=_('Logo'),
93 help_text=_('Upload your event\'s logo, if it is suitable to be displayed in the frontend\'s header.'),
94 )
95 locale_array = models.TextField(default=settings.LANGUAGE_CODE)
96 locale = models.CharField(
97 max_length=32,
98 default=settings.LANGUAGE_CODE,
99 choices=settings.LANGUAGES,
100 verbose_name=_('Default language'),
101 )
102 accept_template = models.ForeignKey(
103 to='mail.MailTemplate', on_delete=models.CASCADE,
104 related_name='+', null=True, blank=True,
105 )
106 ack_template = models.ForeignKey(
107 to='mail.MailTemplate', on_delete=models.CASCADE,
108 related_name='+', null=True, blank=True,
109 )
110 reject_template = models.ForeignKey(
111 to='mail.MailTemplate', on_delete=models.CASCADE,
112 related_name='+', null=True, blank=True,
113 )
114 update_template = models.ForeignKey(
115 to='mail.MailTemplate', on_delete=models.CASCADE,
116 related_name='+', null=True, blank=True,
117 )
118
119 class urls(Urls):
120 base = '/{self.slug}'
121 login = '{base}/login'
122 logout = '{base}/logout'
123 reset = '{base}/reset'
124 submit = '{base}/submit'
125 user = '{base}/me'
126 user_delete = '{base}/me/delete'
127 user_submissions = '{user}/submissions'
128 schedule = '{base}/schedule'
129 changelog = '{schedule}/changelog'
130 frab_xml = '{schedule}.xml'
131 frab_json = '{schedule}.json'
132 frab_xcal = '{schedule}.xcal'
133 ical = '{schedule}.ics'
134 feed = '{schedule}/feed.xml'
135 location = '{schedule}/location'
136
137 class orga_urls(Urls):
138 create = '/orga/event/new'
139 base = '/orga/event/{self.slug}'
140 cfp = '{base}/cfp'
141 users = '{base}/users'
142 mail = '{base}/mails'
143 send_mails = '{mail}/send'
144 mail_templates = '{mail}/templates'
145 new_template = '{mail_templates}/new'
146 outbox = '{mail}/outbox'
147 sent_mails = '{mail}/sent'
148 send_outbox = '{outbox}/send'
149 purge_outbox = '{outbox}/purge'
150 submissions = '{base}/submissions'
151 submission_cards = '{base}/submissions/cards/'
152 new_submission = '{submissions}/new'
153 speakers = '{base}/speakers'
154 settings = '{base}/settings'
155 edit_settings = '{settings}/edit'
156 mail_settings = '{settings}/mail'
157 edit_mail_settings = '{mail_settings}/edit'
158 team_settings = '{settings}/team'
159 invite = '{team_settings}/add'
160 room_settings = '{settings}/rooms'
161 review_settings = '{settings}/reviews'
162 new_room = '{room_settings}/new'
163 schedule = '{base}/schedule'
164 release_schedule = '{schedule}/release'
165 reset_schedule = '{schedule}/reset'
166 toggle_schedule = '{schedule}/toggle'
167 reviews = '{base}/reviews'
168
169 class api_urls(Urls):
170 base = '/orga/event/{self.slug}'
171 schedule = '{base}/schedule/api'
172 rooms = '{schedule}/rooms'
173 talks = '{schedule}/talks'
174
175 def __str__(self) -> str:
176 return str(self.name)
177
178 @property
179 def locales(self) -> list:
180 return self.locale_array.split(",")
181
182 @property
183 def named_locales(self) -> list:
184 enabled = set(self.locale_array.split(","))
185 return [a for a in settings.LANGUAGES_NATURAL_NAMES if a[0] in enabled]
186
187 def save(self, *args, **kwargs):
188 was_created = not bool(self.pk)
189 super().save(*args, **kwargs)
190
191 if was_created:
192 self._build_initial_data()
193
194 def _get_default_submission_type(self):
195 from pretalx.submission.models import Submission, SubmissionType
196 sub_type = Submission.objects.filter(event=self).first()
197 if not sub_type:
198 sub_type = SubmissionType.objects.create(event=self, name='Talk')
199 return sub_type
200
201 @cached_property
202 def fixed_templates(self):
203 return [self.accept_template, self.ack_template, self.reject_template, self.update_template]
204
205 def _build_initial_data(self):
206 from pretalx.mail.default_templates import ACCEPT_TEXT, ACK_TEXT, GENERIC_SUBJECT, REJECT_TEXT, UPDATE_TEXT
207 from pretalx.mail.models import MailTemplate
208
209 if not hasattr(self, 'cfp'):
210 from pretalx.submission.models import CfP
211 CfP.objects.create(event=self, default_type=self._get_default_submission_type())
212
213 if not self.schedules.filter(version__isnull=True).exists():
214 from pretalx.schedule.models import Schedule
215 Schedule.objects.create(event=self)
216
217 self.accept_template = self.accept_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACCEPT_TEXT)
218 self.ack_template = self.ack_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACK_TEXT)
219 self.reject_template = self.reject_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=REJECT_TEXT)
220 self.update_template = self.update_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=UPDATE_TEXT)
221 self.save()
222
223 @cached_property
224 def pending_mails(self):
225 return self.queued_mails.filter(sent__isnull=True).count()
226
227 @cached_property
228 def wip_schedule(self):
229 schedule, _ = self.schedules.get_or_create(version__isnull=True)
230 return schedule
231
232 @cached_property
233 def current_schedule(self):
234 return self.schedules.order_by('-published').filter(published__isnull=False).first()
235
236 @property
237 def duration(self):
238 return (self.date_to - self.date_from).days + 1
239
240 def get_mail_backend(self, force_custom: bool=False) -> BaseEmailBackend:
241 from pretalx.common.mail import CustomSMTPBackend
242
243 if self.settings.smtp_use_custom or force_custom:
244 return CustomSMTPBackend(host=self.settings.smtp_host,
245 port=self.settings.smtp_port,
246 username=self.settings.smtp_username,
247 password=self.settings.smtp_password,
248 use_tls=self.settings.smtp_use_tls,
249 use_ssl=self.settings.smtp_use_ssl,
250 fail_silently=False)
251 else:
252 return get_connection(fail_silently=False)
253
254 @property
255 def event(self):
256 return self
257
258 @property
259 def datetime_from(self):
260 return make_aware(datetime.combine(
261 self.date_from,
262 time(hour=0, minute=0, second=0)
263 ), pytz.timezone(self.timezone))
264
265 @property
266 def datetime_to(self):
267 return make_aware(datetime.combine(
268 self.date_to,
269 time(hour=23, minute=59, second=59)
270 ), pytz.timezone(self.timezone))
271
272 @property
273 def reviews(self):
274 from pretalx.submission.models import Review
275 return Review.objects.filter(submission__event=self)
276
277 def release_schedule(self, name, user=None):
278 self.wip_schedule.freeze(name=name, user=user)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pretalx/event/models/event.py b/src/pretalx/event/models/event.py
--- a/src/pretalx/event/models/event.py
+++ b/src/pretalx/event/models/event.py
@@ -159,6 +159,7 @@
invite = '{team_settings}/add'
room_settings = '{settings}/rooms'
review_settings = '{settings}/reviews'
+ invite_reviewer = '{review_settings}/add'
new_room = '{room_settings}/new'
schedule = '{base}/schedule'
release_schedule = '{schedule}/release'
|
{"golden_diff": "diff --git a/src/pretalx/event/models/event.py b/src/pretalx/event/models/event.py\n--- a/src/pretalx/event/models/event.py\n+++ b/src/pretalx/event/models/event.py\n@@ -159,6 +159,7 @@\n invite = '{team_settings}/add'\n room_settings = '{settings}/rooms'\n review_settings = '{settings}/reviews'\n+ invite_reviewer = '{review_settings}/add'\n new_room = '{room_settings}/new'\n schedule = '{base}/schedule'\n release_schedule = '{schedule}/release'\n", "issue": "Test invite mechanisms\nThis is approximately the third time that I found critical issues in invite mechanisms (for the reviewer team, this time).\r\n\r\n- [x] Test orga invitation\r\n- [x] Test reviewing invitation\r\n- [x] Test speaker invitation by orga\r\n- [x] Test speaker invitation by other speakers\n", "before_files": [{"content": "from datetime import datetime, time\n\nimport pytz\nfrom django.conf import settings\nfrom django.core.mail import get_connection\nfrom django.core.mail.backends.base import BaseEmailBackend\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import ugettext_lazy as _\nfrom i18nfield.fields import I18nCharField\nfrom urlman import Urls\n\nfrom pretalx.common.mixins import LogMixin\nfrom pretalx.common.models.settings import settings_hierarkey\n\nSLUG_CHARS = 'a-zA-Z0-9.-'\n\n\ndef event_css_path(instance, filename):\n return f'{instance.slug}/css/{filename}'\n\n\ndef event_logo_path(instance, filename):\n return f'{instance.slug}/img/{filename}'\n\n\n@settings_hierarkey.add()\nclass Event(LogMixin, models.Model):\n name = I18nCharField(\n max_length=200,\n verbose_name=_('Name'),\n )\n slug = models.SlugField(\n max_length=50, db_index=True,\n validators=[\n RegexValidator(\n regex=f\"^[{SLUG_CHARS}]+$\",\n message=_('The slug may only contain letters, numbers, dots and dashes.'),\n ),\n ],\n verbose_name=_(\"Short form\"),\n help_text=_('Should be short, only contain lowercase letters and numbers, and must be unique, as it is used in URLs.'),\n )\n subtitle = I18nCharField(\n max_length=200,\n null=True, blank=True,\n verbose_name=_('Subtitle'),\n help_text=_('A tagline, or motto, or description. Not mandatory.')\n )\n is_public = models.BooleanField(\n default=False,\n verbose_name=_('Event is public')\n )\n permitted = models.ManyToManyField(\n to='person.User',\n through='person.EventPermission',\n related_name=\"events\",\n )\n date_from = models.DateField(\n verbose_name=_('Event start date'),\n )\n date_to = models.DateField(\n verbose_name=_('Event end date'),\n )\n timezone = models.CharField(\n choices=[(tz, tz) for tz in pytz.common_timezones],\n max_length=30,\n default='UTC',\n )\n email = models.EmailField(\n verbose_name=_('Orga email address'),\n help_text=_('Will be used as sender/reply-to in emails'),\n )\n primary_color = models.CharField(\n max_length=7,\n null=True, blank=True,\n validators=[],\n verbose_name=_('Main event color'),\n help_text=_('Please provide a hex value like #00ff00 if you do not like pretalx colors.'),\n )\n custom_css = models.FileField(\n upload_to=event_css_path,\n null=True, blank=True,\n verbose_name=_('Custom Event CSS'),\n help_text=_('Upload a custom CSS file if changing the primary color is not sufficient for you.'),\n )\n logo = models.FileField(\n upload_to=event_logo_path,\n null=True, blank=True,\n verbose_name=_('Logo'),\n help_text=_('Upload your event\\'s logo, if it is suitable to be displayed in the frontend\\'s header.'),\n )\n locale_array = models.TextField(default=settings.LANGUAGE_CODE)\n locale = models.CharField(\n max_length=32,\n default=settings.LANGUAGE_CODE,\n choices=settings.LANGUAGES,\n verbose_name=_('Default language'),\n )\n accept_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n ack_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n reject_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n update_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n\n class urls(Urls):\n base = '/{self.slug}'\n login = '{base}/login'\n logout = '{base}/logout'\n reset = '{base}/reset'\n submit = '{base}/submit'\n user = '{base}/me'\n user_delete = '{base}/me/delete'\n user_submissions = '{user}/submissions'\n schedule = '{base}/schedule'\n changelog = '{schedule}/changelog'\n frab_xml = '{schedule}.xml'\n frab_json = '{schedule}.json'\n frab_xcal = '{schedule}.xcal'\n ical = '{schedule}.ics'\n feed = '{schedule}/feed.xml'\n location = '{schedule}/location'\n\n class orga_urls(Urls):\n create = '/orga/event/new'\n base = '/orga/event/{self.slug}'\n cfp = '{base}/cfp'\n users = '{base}/users'\n mail = '{base}/mails'\n send_mails = '{mail}/send'\n mail_templates = '{mail}/templates'\n new_template = '{mail_templates}/new'\n outbox = '{mail}/outbox'\n sent_mails = '{mail}/sent'\n send_outbox = '{outbox}/send'\n purge_outbox = '{outbox}/purge'\n submissions = '{base}/submissions'\n submission_cards = '{base}/submissions/cards/'\n new_submission = '{submissions}/new'\n speakers = '{base}/speakers'\n settings = '{base}/settings'\n edit_settings = '{settings}/edit'\n mail_settings = '{settings}/mail'\n edit_mail_settings = '{mail_settings}/edit'\n team_settings = '{settings}/team'\n invite = '{team_settings}/add'\n room_settings = '{settings}/rooms'\n review_settings = '{settings}/reviews'\n new_room = '{room_settings}/new'\n schedule = '{base}/schedule'\n release_schedule = '{schedule}/release'\n reset_schedule = '{schedule}/reset'\n toggle_schedule = '{schedule}/toggle'\n reviews = '{base}/reviews'\n\n class api_urls(Urls):\n base = '/orga/event/{self.slug}'\n schedule = '{base}/schedule/api'\n rooms = '{schedule}/rooms'\n talks = '{schedule}/talks'\n\n def __str__(self) -> str:\n return str(self.name)\n\n @property\n def locales(self) -> list:\n return self.locale_array.split(\",\")\n\n @property\n def named_locales(self) -> list:\n enabled = set(self.locale_array.split(\",\"))\n return [a for a in settings.LANGUAGES_NATURAL_NAMES if a[0] in enabled]\n\n def save(self, *args, **kwargs):\n was_created = not bool(self.pk)\n super().save(*args, **kwargs)\n\n if was_created:\n self._build_initial_data()\n\n def _get_default_submission_type(self):\n from pretalx.submission.models import Submission, SubmissionType\n sub_type = Submission.objects.filter(event=self).first()\n if not sub_type:\n sub_type = SubmissionType.objects.create(event=self, name='Talk')\n return sub_type\n\n @cached_property\n def fixed_templates(self):\n return [self.accept_template, self.ack_template, self.reject_template, self.update_template]\n\n def _build_initial_data(self):\n from pretalx.mail.default_templates import ACCEPT_TEXT, ACK_TEXT, GENERIC_SUBJECT, REJECT_TEXT, UPDATE_TEXT\n from pretalx.mail.models import MailTemplate\n\n if not hasattr(self, 'cfp'):\n from pretalx.submission.models import CfP\n CfP.objects.create(event=self, default_type=self._get_default_submission_type())\n\n if not self.schedules.filter(version__isnull=True).exists():\n from pretalx.schedule.models import Schedule\n Schedule.objects.create(event=self)\n\n self.accept_template = self.accept_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACCEPT_TEXT)\n self.ack_template = self.ack_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACK_TEXT)\n self.reject_template = self.reject_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=REJECT_TEXT)\n self.update_template = self.update_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=UPDATE_TEXT)\n self.save()\n\n @cached_property\n def pending_mails(self):\n return self.queued_mails.filter(sent__isnull=True).count()\n\n @cached_property\n def wip_schedule(self):\n schedule, _ = self.schedules.get_or_create(version__isnull=True)\n return schedule\n\n @cached_property\n def current_schedule(self):\n return self.schedules.order_by('-published').filter(published__isnull=False).first()\n\n @property\n def duration(self):\n return (self.date_to - self.date_from).days + 1\n\n def get_mail_backend(self, force_custom: bool=False) -> BaseEmailBackend:\n from pretalx.common.mail import CustomSMTPBackend\n\n if self.settings.smtp_use_custom or force_custom:\n return CustomSMTPBackend(host=self.settings.smtp_host,\n port=self.settings.smtp_port,\n username=self.settings.smtp_username,\n password=self.settings.smtp_password,\n use_tls=self.settings.smtp_use_tls,\n use_ssl=self.settings.smtp_use_ssl,\n fail_silently=False)\n else:\n return get_connection(fail_silently=False)\n\n @property\n def event(self):\n return self\n\n @property\n def datetime_from(self):\n return make_aware(datetime.combine(\n self.date_from,\n time(hour=0, minute=0, second=0)\n ), pytz.timezone(self.timezone))\n\n @property\n def datetime_to(self):\n return make_aware(datetime.combine(\n self.date_to,\n time(hour=23, minute=59, second=59)\n ), pytz.timezone(self.timezone))\n\n @property\n def reviews(self):\n from pretalx.submission.models import Review\n return Review.objects.filter(submission__event=self)\n\n def release_schedule(self, name, user=None):\n self.wip_schedule.freeze(name=name, user=user)\n", "path": "src/pretalx/event/models/event.py"}], "after_files": [{"content": "from datetime import datetime, time\n\nimport pytz\nfrom django.conf import settings\nfrom django.core.mail import get_connection\nfrom django.core.mail.backends.base import BaseEmailBackend\nfrom django.core.validators import RegexValidator\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import ugettext_lazy as _\nfrom i18nfield.fields import I18nCharField\nfrom urlman import Urls\n\nfrom pretalx.common.mixins import LogMixin\nfrom pretalx.common.models.settings import settings_hierarkey\n\nSLUG_CHARS = 'a-zA-Z0-9.-'\n\n\ndef event_css_path(instance, filename):\n return f'{instance.slug}/css/{filename}'\n\n\ndef event_logo_path(instance, filename):\n return f'{instance.slug}/img/{filename}'\n\n\n@settings_hierarkey.add()\nclass Event(LogMixin, models.Model):\n name = I18nCharField(\n max_length=200,\n verbose_name=_('Name'),\n )\n slug = models.SlugField(\n max_length=50, db_index=True,\n validators=[\n RegexValidator(\n regex=f\"^[{SLUG_CHARS}]+$\",\n message=_('The slug may only contain letters, numbers, dots and dashes.'),\n ),\n ],\n verbose_name=_(\"Short form\"),\n help_text=_('Should be short, only contain lowercase letters and numbers, and must be unique, as it is used in URLs.'),\n )\n subtitle = I18nCharField(\n max_length=200,\n null=True, blank=True,\n verbose_name=_('Subtitle'),\n help_text=_('A tagline, or motto, or description. Not mandatory.')\n )\n is_public = models.BooleanField(\n default=False,\n verbose_name=_('Event is public')\n )\n permitted = models.ManyToManyField(\n to='person.User',\n through='person.EventPermission',\n related_name=\"events\",\n )\n date_from = models.DateField(\n verbose_name=_('Event start date'),\n )\n date_to = models.DateField(\n verbose_name=_('Event end date'),\n )\n timezone = models.CharField(\n choices=[(tz, tz) for tz in pytz.common_timezones],\n max_length=30,\n default='UTC',\n )\n email = models.EmailField(\n verbose_name=_('Orga email address'),\n help_text=_('Will be used as sender/reply-to in emails'),\n )\n primary_color = models.CharField(\n max_length=7,\n null=True, blank=True,\n validators=[],\n verbose_name=_('Main event color'),\n help_text=_('Please provide a hex value like #00ff00 if you do not like pretalx colors.'),\n )\n custom_css = models.FileField(\n upload_to=event_css_path,\n null=True, blank=True,\n verbose_name=_('Custom Event CSS'),\n help_text=_('Upload a custom CSS file if changing the primary color is not sufficient for you.'),\n )\n logo = models.FileField(\n upload_to=event_logo_path,\n null=True, blank=True,\n verbose_name=_('Logo'),\n help_text=_('Upload your event\\'s logo, if it is suitable to be displayed in the frontend\\'s header.'),\n )\n locale_array = models.TextField(default=settings.LANGUAGE_CODE)\n locale = models.CharField(\n max_length=32,\n default=settings.LANGUAGE_CODE,\n choices=settings.LANGUAGES,\n verbose_name=_('Default language'),\n )\n accept_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n ack_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n reject_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n update_template = models.ForeignKey(\n to='mail.MailTemplate', on_delete=models.CASCADE,\n related_name='+', null=True, blank=True,\n )\n\n class urls(Urls):\n base = '/{self.slug}'\n login = '{base}/login'\n logout = '{base}/logout'\n reset = '{base}/reset'\n submit = '{base}/submit'\n user = '{base}/me'\n user_delete = '{base}/me/delete'\n user_submissions = '{user}/submissions'\n schedule = '{base}/schedule'\n changelog = '{schedule}/changelog'\n frab_xml = '{schedule}.xml'\n frab_json = '{schedule}.json'\n frab_xcal = '{schedule}.xcal'\n ical = '{schedule}.ics'\n feed = '{schedule}/feed.xml'\n location = '{schedule}/location'\n\n class orga_urls(Urls):\n create = '/orga/event/new'\n base = '/orga/event/{self.slug}'\n cfp = '{base}/cfp'\n users = '{base}/users'\n mail = '{base}/mails'\n send_mails = '{mail}/send'\n mail_templates = '{mail}/templates'\n new_template = '{mail_templates}/new'\n outbox = '{mail}/outbox'\n sent_mails = '{mail}/sent'\n send_outbox = '{outbox}/send'\n purge_outbox = '{outbox}/purge'\n submissions = '{base}/submissions'\n submission_cards = '{base}/submissions/cards/'\n new_submission = '{submissions}/new'\n speakers = '{base}/speakers'\n settings = '{base}/settings'\n edit_settings = '{settings}/edit'\n mail_settings = '{settings}/mail'\n edit_mail_settings = '{mail_settings}/edit'\n team_settings = '{settings}/team'\n invite = '{team_settings}/add'\n room_settings = '{settings}/rooms'\n review_settings = '{settings}/reviews'\n invite_reviewer = '{review_settings}/add'\n new_room = '{room_settings}/new'\n schedule = '{base}/schedule'\n release_schedule = '{schedule}/release'\n reset_schedule = '{schedule}/reset'\n toggle_schedule = '{schedule}/toggle'\n reviews = '{base}/reviews'\n\n class api_urls(Urls):\n base = '/orga/event/{self.slug}'\n schedule = '{base}/schedule/api'\n rooms = '{schedule}/rooms'\n talks = '{schedule}/talks'\n\n def __str__(self) -> str:\n return str(self.name)\n\n @property\n def locales(self) -> list:\n return self.locale_array.split(\",\")\n\n @property\n def named_locales(self) -> list:\n enabled = set(self.locale_array.split(\",\"))\n return [a for a in settings.LANGUAGES_NATURAL_NAMES if a[0] in enabled]\n\n def save(self, *args, **kwargs):\n was_created = not bool(self.pk)\n super().save(*args, **kwargs)\n\n if was_created:\n self._build_initial_data()\n\n def _get_default_submission_type(self):\n from pretalx.submission.models import Submission, SubmissionType\n sub_type = Submission.objects.filter(event=self).first()\n if not sub_type:\n sub_type = SubmissionType.objects.create(event=self, name='Talk')\n return sub_type\n\n @cached_property\n def fixed_templates(self):\n return [self.accept_template, self.ack_template, self.reject_template, self.update_template]\n\n def _build_initial_data(self):\n from pretalx.mail.default_templates import ACCEPT_TEXT, ACK_TEXT, GENERIC_SUBJECT, REJECT_TEXT, UPDATE_TEXT\n from pretalx.mail.models import MailTemplate\n\n if not hasattr(self, 'cfp'):\n from pretalx.submission.models import CfP\n CfP.objects.create(event=self, default_type=self._get_default_submission_type())\n\n if not self.schedules.filter(version__isnull=True).exists():\n from pretalx.schedule.models import Schedule\n Schedule.objects.create(event=self)\n\n self.accept_template = self.accept_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACCEPT_TEXT)\n self.ack_template = self.ack_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=ACK_TEXT)\n self.reject_template = self.reject_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=REJECT_TEXT)\n self.update_template = self.update_template or MailTemplate.objects.create(event=self, subject=GENERIC_SUBJECT, text=UPDATE_TEXT)\n self.save()\n\n @cached_property\n def pending_mails(self):\n return self.queued_mails.filter(sent__isnull=True).count()\n\n @cached_property\n def wip_schedule(self):\n schedule, _ = self.schedules.get_or_create(version__isnull=True)\n return schedule\n\n @cached_property\n def current_schedule(self):\n return self.schedules.order_by('-published').filter(published__isnull=False).first()\n\n @property\n def duration(self):\n return (self.date_to - self.date_from).days + 1\n\n def get_mail_backend(self, force_custom: bool=False) -> BaseEmailBackend:\n from pretalx.common.mail import CustomSMTPBackend\n\n if self.settings.smtp_use_custom or force_custom:\n return CustomSMTPBackend(host=self.settings.smtp_host,\n port=self.settings.smtp_port,\n username=self.settings.smtp_username,\n password=self.settings.smtp_password,\n use_tls=self.settings.smtp_use_tls,\n use_ssl=self.settings.smtp_use_ssl,\n fail_silently=False)\n else:\n return get_connection(fail_silently=False)\n\n @property\n def event(self):\n return self\n\n @property\n def datetime_from(self):\n return make_aware(datetime.combine(\n self.date_from,\n time(hour=0, minute=0, second=0)\n ), pytz.timezone(self.timezone))\n\n @property\n def datetime_to(self):\n return make_aware(datetime.combine(\n self.date_to,\n time(hour=23, minute=59, second=59)\n ), pytz.timezone(self.timezone))\n\n @property\n def reviews(self):\n from pretalx.submission.models import Review\n return Review.objects.filter(submission__event=self)\n\n def release_schedule(self, name, user=None):\n self.wip_schedule.freeze(name=name, user=user)\n", "path": "src/pretalx/event/models/event.py"}]}
| 3,272 | 128 |
gh_patches_debug_33129
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1259
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
If id, schema or last_modified are marked as required, record can't be validated
Because we pop those fields before validating them with the collection schema.
We can either:
* prevent those fields to be mentioned as `required` when defining the collection schema
* or not pop them if they are present in the schema before validating
If id, schema or last_modified are marked as required, record can't be validated
Because we pop those fields before validating them with the collection schema.
We can either:
* prevent those fields to be mentioned as `required` when defining the collection schema
* or not pop them if they are present in the schema before validating
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/records.py`
Content:
```
1 import copy
2
3 import jsonschema
4 from kinto.core import resource, utils
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import object_exists_or_404
11
12
13 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
14
15
16 @resource.register(name='record',
17 collection_path=_parent_path + '/records',
18 record_path=_parent_path + '/records/{{id}}')
19 class Record(resource.ShareableResource):
20
21 schema_field = 'schema'
22
23 def __init__(self, request, **kwargs):
24 # Before all, first check that the parent collection exists.
25 # Check if already fetched before (in batch).
26 collections = request.bound_data.setdefault('collections', {})
27 collection_uri = self.get_parent_id(request)
28 if collection_uri not in collections:
29 # Unknown yet, fetch from storage.
30 collection_parent_id = utils.instance_uri(request, 'bucket',
31 id=self.bucket_id)
32 collection = object_exists_or_404(request,
33 collection_id='collection',
34 parent_id=collection_parent_id,
35 object_id=self.collection_id)
36 collections[collection_uri] = collection
37
38 super().__init__(request, **kwargs)
39 self._collection = collections[collection_uri]
40
41 def get_parent_id(self, request):
42 self.bucket_id = request.matchdict['bucket_id']
43 self.collection_id = request.matchdict['collection_id']
44 return utils.instance_uri(request, 'collection',
45 bucket_id=self.bucket_id,
46 id=self.collection_id)
47
48 def process_record(self, new, old=None):
49 """Validate records against collection schema, if any."""
50 new = super().process_record(new, old)
51
52 schema = self._collection.get('schema')
53 settings = self.request.registry.settings
54 schema_validation = 'experimental_collection_schema_validation'
55 if not schema or not asbool(settings.get(schema_validation)):
56 return new
57
58 collection_timestamp = self._collection[self.model.modified_field]
59
60 try:
61 stripped = copy.deepcopy(new)
62 stripped.pop(self.model.id_field, None)
63 stripped.pop(self.model.modified_field, None)
64 stripped.pop(self.model.permissions_field, None)
65 stripped.pop(self.schema_field, None)
66 jsonschema.validate(stripped, schema)
67 except jsonschema_exceptions.ValidationError as e:
68 if e.validator_value:
69 field = e.validator_value[-1]
70 else:
71 field = e.schema_path[-1]
72 raise_invalid(self.request, name=field, description=e.message)
73
74 new[self.schema_field] = collection_timestamp
75 return new
76
77 def collection_get(self):
78 result = super().collection_get()
79 self._handle_cache_expires(self.request.response)
80 return result
81
82 def get(self):
83 result = super().get()
84 self._handle_cache_expires(self.request.response)
85 return result
86
87 def _handle_cache_expires(self, response):
88 """If the parent collection defines a ``cache_expires`` attribute,
89 then cache-control response headers are sent.
90
91 .. note::
92
93 Those headers are also sent if the
94 ``kinto.record_cache_expires_seconds`` setting is defined.
95 """
96 is_anonymous = Authenticated not in self.request.effective_principals
97 if not is_anonymous:
98 return
99
100 cache_expires = self._collection.get('cache_expires')
101 if cache_expires is None:
102 by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)
103 by_collection = '{}_{}_record_cache_expires_seconds'.format(
104 self.bucket_id, self.collection_id)
105 settings = self.request.registry.settings
106 cache_expires = settings.get(by_collection,
107 settings.get(by_bucket))
108
109 if cache_expires is not None:
110 response.cache_expires(seconds=int(cache_expires))
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -1,5 +1,3 @@
-import copy
-
import jsonschema
from kinto.core import resource, utils
from kinto.core.errors import raise_invalid
@@ -55,15 +53,21 @@
if not schema or not asbool(settings.get(schema_validation)):
return new
- collection_timestamp = self._collection[self.model.modified_field]
-
+ # Remove internal and auto-assigned fields from schema and record.
+ internal_fields = (self.model.id_field,
+ self.model.modified_field,
+ self.schema_field,
+ self.model.permissions_field)
+ required_fields = [f for f in schema.get('required', []) if f not in internal_fields]
+ if required_fields:
+ schema = {**schema, 'required': required_fields}
+ else:
+ schema = {f: v for f, v in new.items() if f != 'required'}
+ data = {f: v for f, v in new.items() if f not in internal_fields}
+
+ # Validate or fail with 400.
try:
- stripped = copy.deepcopy(new)
- stripped.pop(self.model.id_field, None)
- stripped.pop(self.model.modified_field, None)
- stripped.pop(self.model.permissions_field, None)
- stripped.pop(self.schema_field, None)
- jsonschema.validate(stripped, schema)
+ jsonschema.validate(data, schema)
except jsonschema_exceptions.ValidationError as e:
if e.validator_value:
field = e.validator_value[-1]
@@ -71,7 +75,10 @@
field = e.schema_path[-1]
raise_invalid(self.request, name=field, description=e.message)
+ # Assign the schema version (collection object timestamp) to the record.
+ collection_timestamp = self._collection[self.model.modified_field]
new[self.schema_field] = collection_timestamp
+
return new
def collection_get(self):
|
{"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -1,5 +1,3 @@\n-import copy\n-\n import jsonschema\n from kinto.core import resource, utils\n from kinto.core.errors import raise_invalid\n@@ -55,15 +53,21 @@\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n \n- collection_timestamp = self._collection[self.model.modified_field]\n-\n+ # Remove internal and auto-assigned fields from schema and record.\n+ internal_fields = (self.model.id_field,\n+ self.model.modified_field,\n+ self.schema_field,\n+ self.model.permissions_field)\n+ required_fields = [f for f in schema.get('required', []) if f not in internal_fields]\n+ if required_fields:\n+ schema = {**schema, 'required': required_fields}\n+ else:\n+ schema = {f: v for f, v in new.items() if f != 'required'}\n+ data = {f: v for f, v in new.items() if f not in internal_fields}\n+\n+ # Validate or fail with 400.\n try:\n- stripped = copy.deepcopy(new)\n- stripped.pop(self.model.id_field, None)\n- stripped.pop(self.model.modified_field, None)\n- stripped.pop(self.model.permissions_field, None)\n- stripped.pop(self.schema_field, None)\n- jsonschema.validate(stripped, schema)\n+ jsonschema.validate(data, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n@@ -71,7 +75,10 @@\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n \n+ # Assign the schema version (collection object timestamp) to the record.\n+ collection_timestamp = self._collection[self.model.modified_field]\n new[self.schema_field] = collection_timestamp\n+\n return new\n \n def collection_get(self):\n", "issue": "If id, schema or last_modified are marked as required, record can't be validated\nBecause we pop those fields before validating them with the collection schema.\r\n\r\nWe can either:\r\n* prevent those fields to be mentioned as `required` when defining the collection schema\r\n* or not pop them if they are present in the schema before validating\nIf id, schema or last_modified are marked as required, record can't be validated\nBecause we pop those fields before validating them with the collection schema.\r\n\r\nWe can either:\r\n* prevent those fields to be mentioned as `required` when defining the collection schema\r\n* or not pop them if they are present in the schema before validating\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import jsonschema\nfrom kinto.core import resource, utils\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = utils.instance_uri(request, 'bucket',\n id=self.bucket_id)\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super().__init__(request, **kwargs)\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return utils.instance_uri(request, 'collection',\n bucket_id=self.bucket_id,\n id=self.collection_id)\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super().process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n # Remove internal and auto-assigned fields from schema and record.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.schema_field,\n self.model.permissions_field)\n required_fields = [f for f in schema.get('required', []) if f not in internal_fields]\n if required_fields:\n schema = {**schema, 'required': required_fields}\n else:\n schema = {f: v for f, v in new.items() if f != 'required'}\n data = {f: v for f, v in new.items() if f not in internal_fields}\n\n # Validate or fail with 400.\n try:\n jsonschema.validate(data, schema)\n except jsonschema_exceptions.ValidationError as e:\n if e.validator_value:\n field = e.validator_value[-1]\n else:\n field = e.schema_path[-1]\n raise_invalid(self.request, name=field, description=e.message)\n\n # Assign the schema version (collection object timestamp) to the record.\n collection_timestamp = self._collection[self.model.modified_field]\n new[self.schema_field] = collection_timestamp\n\n return new\n\n def collection_get(self):\n result = super().collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super().get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '{}_record_cache_expires_seconds'.format(self.bucket_id)\n by_collection = '{}_{}_record_cache_expires_seconds'.format(\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]}
| 1,444 | 453 |
gh_patches_debug_7892
|
rasdani/github-patches
|
git_diff
|
pwr-Solaar__Solaar-23
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
no battery status on M510 mouse
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/logitech/unifying_receiver/hidpp10.py`
Content:
```
1 #
2 #
3 #
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 from logging import getLogger # , DEBUG as _DEBUG
8 _log = getLogger('LUR').getChild('hidpp10')
9 del getLogger
10
11 from .common import (strhex as _strhex,
12 NamedInts as _NamedInts,
13 FirmwareInfo as _FirmwareInfo)
14 from .hidpp20 import FIRMWARE_KIND
15
16 #
17 # constants
18 #
19
20 DEVICE_KIND = _NamedInts(
21 keyboard=0x01,
22 mouse=0x02,
23 numpad=0x03,
24 presenter=0x04,
25 trackball=0x08,
26 touchpad=0x09)
27
28 POWER_SWITCH_LOCATION = _NamedInts(
29 base=0x01,
30 top_case=0x02,
31 edge_of_top_right_corner=0x03,
32 top_left_corner=0x05,
33 bottom_left_corner=0x06,
34 top_right_corner=0x07,
35 bottom_right_corner=0x08,
36 top_edge=0x09,
37 right_edge=0x0A,
38 left_edge=0x0B,
39 bottom_edge=0x0C)
40
41 NOTIFICATION_FLAG = _NamedInts(
42 battery_status=0x100000,
43 wireless=0x000100,
44 software_present=0x0000800)
45
46 ERROR = _NamedInts(
47 invalid_SubID__command=0x01,
48 invalid_address=0x02,
49 invalid_value=0x03,
50 connection_request_failed=0x04,
51 too_many_devices=0x05,
52 already_exists=0x06,
53 busy=0x07,
54 unknown_device=0x08,
55 resource_error=0x09,
56 request_unavailable=0x0A,
57 unsupported_parameter_value=0x0B,
58 wrong_pin_code=0x0C)
59
60 PAIRING_ERRORS = _NamedInts(
61 device_timeout=0x01,
62 device_not_supported=0x02,
63 too_many_devices=0x03,
64 sequence_timeout=0x06)
65
66 #
67 # functions
68 #
69
70 def get_register(device, name, default_number=-1):
71 known_register = device.registers[name]
72 register = known_register or default_number
73 if register > 0:
74 reply = device.request(0x8100 + (register & 0xFF))
75 if reply:
76 return reply
77
78 if not known_register and device.ping():
79 _log.warn("%s: failed to read '%s' from default register 0x%02X, blacklisting", device, name, default_number)
80 device.registers[-default_number] = name
81
82
83 def get_battery(device):
84 """Reads a device's battery level, if provided by the HID++ 1.0 protocol."""
85 reply = get_register(device, 'battery', 0x0D)
86 if reply:
87 charge = ord(reply[:1])
88 status = ord(reply[2:3]) & 0xF0
89 status = ('discharging' if status == 0x30
90 else 'charging' if status == 0x50
91 else 'fully charged' if status == 0x90
92 else None)
93 return charge, status
94
95 reply = get_register(device, 'battery_status', 0x07)
96 if reply:
97 battery_status = ord(reply[:1])
98 _log.info("%s: battery status %02X", device, battery_status)
99
100
101 def get_serial(device):
102 if device.kind is None:
103 dev_id = 0x03
104 receiver = device
105 else:
106 dev_id = 0x30 + device.number - 1
107 receiver = device.receiver
108
109 serial = receiver.request(0x83B5, dev_id)
110 if serial:
111 return _strhex(serial[1:5])
112
113
114 def get_firmware(device):
115 firmware = []
116
117 reply = device.request(0x81F1, 0x01)
118 if reply:
119 fw_version = _strhex(reply[1:3])
120 fw_version = '%s.%s' % (fw_version[0:2], fw_version[2:4])
121 reply = device.request(0x81F1, 0x02)
122 if reply:
123 fw_version += '.B' + _strhex(reply[1:3])
124 fw = _FirmwareInfo(FIRMWARE_KIND.Firmware, '', fw_version, None)
125 firmware.append(fw)
126
127 reply = device.request(0x81F1, 0x04)
128 if reply:
129 bl_version = _strhex(reply[1:3])
130 bl_version = '%s.%s' % (bl_version[0:2], bl_version[2:4])
131 bl = _FirmwareInfo(FIRMWARE_KIND.Bootloader, '', bl_version, None)
132 firmware.append(bl)
133
134 return tuple(firmware)
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/logitech/unifying_receiver/hidpp10.py b/lib/logitech/unifying_receiver/hidpp10.py
--- a/lib/logitech/unifying_receiver/hidpp10.py
+++ b/lib/logitech/unifying_receiver/hidpp10.py
@@ -94,8 +94,16 @@
reply = get_register(device, 'battery_status', 0x07)
if reply:
- battery_status = ord(reply[:1])
- _log.info("%s: battery status %02X", device, battery_status)
+ level = ord(reply[:1])
+ battery_status = ord(reply[2:3])
+ charge = (90 if level == 7 # full
+ else 50 if level == 5 # good
+ else 20 if level == 3 # low
+ else 5 if level == 1 # critical
+ else 0 ) # wtf?
+ status = ('charging' if battery_status == 0x25
+ else 'discharging')
+ return charge, status
def get_serial(device):
|
{"golden_diff": "diff --git a/lib/logitech/unifying_receiver/hidpp10.py b/lib/logitech/unifying_receiver/hidpp10.py\n--- a/lib/logitech/unifying_receiver/hidpp10.py\n+++ b/lib/logitech/unifying_receiver/hidpp10.py\n@@ -94,8 +94,16 @@\n \n \treply = get_register(device, 'battery_status', 0x07)\n \tif reply:\n-\t\tbattery_status = ord(reply[:1])\n-\t\t_log.info(\"%s: battery status %02X\", device, battery_status)\n+\t\tlevel = ord(reply[:1])\n+\t\tbattery_status = ord(reply[2:3])\n+\t\tcharge = (90 if level == 7 # full\n+\t\t\telse 50 if level == 5 # good\n+\t\t\telse 20 if level == 3 # low\n+\t\t\telse 5 if level == 1 # critical\n+\t\t\telse 0 ) # wtf?\n+\t\tstatus = ('charging' if battery_status == 0x25\n+\t\t\telse 'discharging')\n+\t\treturn charge, status\n \n \n def get_serial(device):\n", "issue": "no battery status on M510 mouse\n\n", "before_files": [{"content": "#\n#\n#\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger # , DEBUG as _DEBUG\n_log = getLogger('LUR').getChild('hidpp10')\ndel getLogger\n\nfrom .common import (strhex as _strhex,\n\t\t\t\t\tNamedInts as _NamedInts,\n\t\t\t\t\tFirmwareInfo as _FirmwareInfo)\nfrom .hidpp20 import FIRMWARE_KIND\n\n#\n# constants\n#\n\nDEVICE_KIND = _NamedInts(\n\t\t\t\tkeyboard=0x01,\n\t\t\t\tmouse=0x02,\n\t\t\t\tnumpad=0x03,\n\t\t\t\tpresenter=0x04,\n\t\t\t\ttrackball=0x08,\n\t\t\t\ttouchpad=0x09)\n\nPOWER_SWITCH_LOCATION = _NamedInts(\n\t\t\t\tbase=0x01,\n\t\t\t\ttop_case=0x02,\n\t\t\t\tedge_of_top_right_corner=0x03,\n\t\t\t\ttop_left_corner=0x05,\n\t\t\t\tbottom_left_corner=0x06,\n\t\t\t\ttop_right_corner=0x07,\n\t\t\t\tbottom_right_corner=0x08,\n\t\t\t\ttop_edge=0x09,\n\t\t\t\tright_edge=0x0A,\n\t\t\t\tleft_edge=0x0B,\n\t\t\t\tbottom_edge=0x0C)\n\nNOTIFICATION_FLAG = _NamedInts(\n\t\t\t\tbattery_status=0x100000,\n\t\t\t\twireless=0x000100,\n\t\t\t\tsoftware_present=0x0000800)\n\nERROR = _NamedInts(\n\t\t\t\tinvalid_SubID__command=0x01,\n\t\t\t\tinvalid_address=0x02,\n\t\t\t\tinvalid_value=0x03,\n\t\t\t\tconnection_request_failed=0x04,\n\t\t\t\ttoo_many_devices=0x05,\n\t\t\t\talready_exists=0x06,\n\t\t\t\tbusy=0x07,\n\t\t\t\tunknown_device=0x08,\n\t\t\t\tresource_error=0x09,\n\t\t\t\trequest_unavailable=0x0A,\n\t\t\t\tunsupported_parameter_value=0x0B,\n\t\t\t\twrong_pin_code=0x0C)\n\nPAIRING_ERRORS = _NamedInts(\n\t\t\t\tdevice_timeout=0x01,\n\t\t\t\tdevice_not_supported=0x02,\n\t\t\t\ttoo_many_devices=0x03,\n\t\t\t\tsequence_timeout=0x06)\n\n#\n# functions\n#\n\ndef get_register(device, name, default_number=-1):\n\tknown_register = device.registers[name]\n\tregister = known_register or default_number\n\tif register > 0:\n\t\treply = device.request(0x8100 + (register & 0xFF))\n\t\tif reply:\n\t\t\treturn reply\n\n\t\tif not known_register and device.ping():\n\t\t\t_log.warn(\"%s: failed to read '%s' from default register 0x%02X, blacklisting\", device, name, default_number)\n\t\t\tdevice.registers[-default_number] = name\n\n\ndef get_battery(device):\n\t\"\"\"Reads a device's battery level, if provided by the HID++ 1.0 protocol.\"\"\"\n\treply = get_register(device, 'battery', 0x0D)\n\tif reply:\n\t\tcharge = ord(reply[:1])\n\t\tstatus = ord(reply[2:3]) & 0xF0\n\t\tstatus = ('discharging' if status == 0x30\n\t\t\t\telse 'charging' if status == 0x50\n\t\t\t\telse 'fully charged' if status == 0x90\n\t\t\t\telse None)\n\t\treturn charge, status\n\n\treply = get_register(device, 'battery_status', 0x07)\n\tif reply:\n\t\tbattery_status = ord(reply[:1])\n\t\t_log.info(\"%s: battery status %02X\", device, battery_status)\n\n\ndef get_serial(device):\n\tif device.kind is None:\n\t\tdev_id = 0x03\n\t\treceiver = device\n\telse:\n\t\tdev_id = 0x30 + device.number - 1\n\t\treceiver = device.receiver\n\n\tserial = receiver.request(0x83B5, dev_id)\n\tif serial:\n\t\treturn _strhex(serial[1:5])\n\n\ndef get_firmware(device):\n\tfirmware = []\n\n\treply = device.request(0x81F1, 0x01)\n\tif reply:\n\t\tfw_version = _strhex(reply[1:3])\n\t\tfw_version = '%s.%s' % (fw_version[0:2], fw_version[2:4])\n\t\treply = device.request(0x81F1, 0x02)\n\t\tif reply:\n\t\t\tfw_version += '.B' + _strhex(reply[1:3])\n\t\tfw = _FirmwareInfo(FIRMWARE_KIND.Firmware, '', fw_version, None)\n\t\tfirmware.append(fw)\n\n\treply = device.request(0x81F1, 0x04)\n\tif reply:\n\t\tbl_version = _strhex(reply[1:3])\n\t\tbl_version = '%s.%s' % (bl_version[0:2], bl_version[2:4])\n\t\tbl = _FirmwareInfo(FIRMWARE_KIND.Bootloader, '', bl_version, None)\n\t\tfirmware.append(bl)\n\n\treturn tuple(firmware)\n", "path": "lib/logitech/unifying_receiver/hidpp10.py"}], "after_files": [{"content": "#\n#\n#\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger # , DEBUG as _DEBUG\n_log = getLogger('LUR').getChild('hidpp10')\ndel getLogger\n\nfrom .common import (strhex as _strhex,\n\t\t\t\t\tNamedInts as _NamedInts,\n\t\t\t\t\tFirmwareInfo as _FirmwareInfo)\nfrom .hidpp20 import FIRMWARE_KIND\n\n#\n# constants\n#\n\nDEVICE_KIND = _NamedInts(\n\t\t\t\tkeyboard=0x01,\n\t\t\t\tmouse=0x02,\n\t\t\t\tnumpad=0x03,\n\t\t\t\tpresenter=0x04,\n\t\t\t\ttrackball=0x08,\n\t\t\t\ttouchpad=0x09)\n\nPOWER_SWITCH_LOCATION = _NamedInts(\n\t\t\t\tbase=0x01,\n\t\t\t\ttop_case=0x02,\n\t\t\t\tedge_of_top_right_corner=0x03,\n\t\t\t\ttop_left_corner=0x05,\n\t\t\t\tbottom_left_corner=0x06,\n\t\t\t\ttop_right_corner=0x07,\n\t\t\t\tbottom_right_corner=0x08,\n\t\t\t\ttop_edge=0x09,\n\t\t\t\tright_edge=0x0A,\n\t\t\t\tleft_edge=0x0B,\n\t\t\t\tbottom_edge=0x0C)\n\nNOTIFICATION_FLAG = _NamedInts(\n\t\t\t\tbattery_status=0x100000,\n\t\t\t\twireless=0x000100,\n\t\t\t\tsoftware_present=0x0000800)\n\nERROR = _NamedInts(\n\t\t\t\tinvalid_SubID__command=0x01,\n\t\t\t\tinvalid_address=0x02,\n\t\t\t\tinvalid_value=0x03,\n\t\t\t\tconnection_request_failed=0x04,\n\t\t\t\ttoo_many_devices=0x05,\n\t\t\t\talready_exists=0x06,\n\t\t\t\tbusy=0x07,\n\t\t\t\tunknown_device=0x08,\n\t\t\t\tresource_error=0x09,\n\t\t\t\trequest_unavailable=0x0A,\n\t\t\t\tunsupported_parameter_value=0x0B,\n\t\t\t\twrong_pin_code=0x0C)\n\nPAIRING_ERRORS = _NamedInts(\n\t\t\t\tdevice_timeout=0x01,\n\t\t\t\tdevice_not_supported=0x02,\n\t\t\t\ttoo_many_devices=0x03,\n\t\t\t\tsequence_timeout=0x06)\n\n#\n# functions\n#\n\ndef get_register(device, name, default_number=-1):\n\tknown_register = device.registers[name]\n\tregister = known_register or default_number\n\tif register > 0:\n\t\treply = device.request(0x8100 + (register & 0xFF))\n\t\tif reply:\n\t\t\treturn reply\n\n\t\tif not known_register and device.ping():\n\t\t\t_log.warn(\"%s: failed to read '%s' from default register 0x%02X, blacklisting\", device, name, default_number)\n\t\t\tdevice.registers[-default_number] = name\n\n\ndef get_battery(device):\n\t\"\"\"Reads a device's battery level, if provided by the HID++ 1.0 protocol.\"\"\"\n\treply = get_register(device, 'battery', 0x0D)\n\tif reply:\n\t\tcharge = ord(reply[:1])\n\t\tstatus = ord(reply[2:3]) & 0xF0\n\t\tstatus = ('discharging' if status == 0x30\n\t\t\t\telse 'charging' if status == 0x50\n\t\t\t\telse 'fully charged' if status == 0x90\n\t\t\t\telse None)\n\t\treturn charge, status\n\n\treply = get_register(device, 'battery_status', 0x07)\n\tif reply:\n\t\tlevel = ord(reply[:1])\n\t\tbattery_status = ord(reply[2:3])\n\t\tcharge = (90 if level == 7 # full\n\t\t\telse 50 if level == 5 # good\n\t\t\telse 20 if level == 3 # low\n\t\t\telse 5 if level == 1 # critical\n\t\t\telse 0 ) # wtf?\n\t\tstatus = ('charging' if battery_status == 0x25\n\t\t\telse 'discharging')\n\t\treturn charge, status\n\n\ndef get_serial(device):\n\tif device.kind is None:\n\t\tdev_id = 0x03\n\t\treceiver = device\n\telse:\n\t\tdev_id = 0x30 + device.number - 1\n\t\treceiver = device.receiver\n\n\tserial = receiver.request(0x83B5, dev_id)\n\tif serial:\n\t\treturn _strhex(serial[1:5])\n\n\ndef get_firmware(device):\n\tfirmware = []\n\n\treply = device.request(0x81F1, 0x01)\n\tif reply:\n\t\tfw_version = _strhex(reply[1:3])\n\t\tfw_version = '%s.%s' % (fw_version[0:2], fw_version[2:4])\n\t\treply = device.request(0x81F1, 0x02)\n\t\tif reply:\n\t\t\tfw_version += '.B' + _strhex(reply[1:3])\n\t\tfw = _FirmwareInfo(FIRMWARE_KIND.Firmware, '', fw_version, None)\n\t\tfirmware.append(fw)\n\n\treply = device.request(0x81F1, 0x04)\n\tif reply:\n\t\tbl_version = _strhex(reply[1:3])\n\t\tbl_version = '%s.%s' % (bl_version[0:2], bl_version[2:4])\n\t\tbl = _FirmwareInfo(FIRMWARE_KIND.Bootloader, '', bl_version, None)\n\t\tfirmware.append(bl)\n\n\treturn tuple(firmware)\n", "path": "lib/logitech/unifying_receiver/hidpp10.py"}]}
| 1,733 | 252 |
gh_patches_debug_20883
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3846
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document changed CrawlerProcess.crawl(spider) functionality in Release notes
Possible Regression. See explanation beneath spider.
MWE Testcode:
```python3
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
import logging
import scrapy
logger = logging.getLogger(__name__)
class Spider(scrapy.Spider):
name = 'Spidy'
def start_requests(self):
yield scrapy.Request('https://scrapy.org/')
def parse(self, response):
logger.info('Here I fetched %s for you. [%s]' % (response.url, response.status))
return {
'status': response.status,
'url': response.url,
'test': 'item',
}
class LogPipeline(object):
def process_item(self, item, spider):
logger.warning('HIT ME PLEASE')
logger.info('Got hit by:\n %r' % item)
return item
if __name__ == "__main__":
from scrapy.settings import Settings
from scrapy.crawler import CrawlerProcess
settings = Settings(values={
'TELNETCONSOLE_ENABLED': False, # necessary evil :(
'EXTENSIONS': {
'scrapy.extensions.telnet.TelnetConsole': None,
},
'ITEM_PIPELINES': {
'__main__.LogPipeline': 800,
},
})
spider = Spider()
process = CrawlerProcess(settings=settings)
process.crawl(spider)
process.start()
```
I just tried this functional (with Scrapy 1.5.1) example script on current master codebase and I got this error:
```
2019-07-12 13:54:16 [scrapy.utils.log] INFO: Scrapy 1.6.0 started (bot: scrapybot)
2019-07-12 13:54:16 [scrapy.utils.log] INFO: Versions: lxml 4.3.2.0, libxml2 2.9.4, cssselect 1.0.3, parsel 1.5.0, w3lib 1.20.0, Twisted 18.9.0, Python 3.7.3 (default, Apr 3 2019, 05:39:12) - [GCC 8.3.0], pyOpenSSL 19.0.0 (OpenSSL 1.1.1c 28 May 2019), cryptography 2.6.1, Platform Linux-4.9.0-8-amd64-x86_64-with-debian-10.0
Traceback (most recent call last):
File "./test.py", line 60, in <module>
process.crawl(spider)
File "[...]/scrapy.git/scrapy/crawler.py", line 180, in crawl
'The crawler_or_spidercls argument cannot be a spider object, '
ValueError: The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)
```
Looking at the codebase, blame blames this change: https://github.com/scrapy/scrapy/pull/3610
But that procedure (passing a spider instance as `process.crawl(spider)`) is taken pretty much verbatim from the (latest) docs, so it should continue to work, ~or first get deprecated~?: https://docs.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script
**edit:/** to clarify, I don't mind the functionality getting removed without deprecation, if it was never documented, as it seems it wasn't.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Scrapy documentation build configuration file, created by
4 # sphinx-quickstart on Mon Nov 24 12:02:52 2008.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # The contents of this file are pickled, so don't put values in the namespace
9 # that aren't pickleable (module imports are okay, they're removed automatically).
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys
15 from os import path
16
17 # If your extensions are in another directory, add it here. If the directory
18 # is relative to the documentation root, use os.path.abspath to make it
19 # absolute, like shown here.
20 sys.path.append(path.join(path.dirname(__file__), "_ext"))
21 sys.path.insert(0, path.dirname(path.dirname(__file__)))
22
23
24 # General configuration
25 # ---------------------
26
27 # Add any Sphinx extension module names here, as strings. They can be extensions
28 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
29 extensions = [
30 'scrapydocs',
31 'sphinx.ext.autodoc',
32 'sphinx.ext.coverage',
33 ]
34
35 # Add any paths that contain templates here, relative to this directory.
36 templates_path = ['_templates']
37
38 # The suffix of source filenames.
39 source_suffix = '.rst'
40
41 # The encoding of source files.
42 #source_encoding = 'utf-8'
43
44 # The master toctree document.
45 master_doc = 'index'
46
47 # General information about the project.
48 project = u'Scrapy'
49 copyright = u'2008–2018, Scrapy developers'
50
51 # The version info for the project you're documenting, acts as replacement for
52 # |version| and |release|, also used in various other places throughout the
53 # built documents.
54 #
55 # The short X.Y version.
56 try:
57 import scrapy
58 version = '.'.join(map(str, scrapy.version_info[:2]))
59 release = scrapy.__version__
60 except ImportError:
61 version = ''
62 release = ''
63
64 # The language for content autogenerated by Sphinx. Refer to documentation
65 # for a list of supported languages.
66 language = 'en'
67
68 # There are two options for replacing |today|: either, you set today to some
69 # non-false value, then it is used:
70 #today = ''
71 # Else, today_fmt is used as the format for a strftime call.
72 #today_fmt = '%B %d, %Y'
73
74 # List of documents that shouldn't be included in the build.
75 #unused_docs = []
76
77 # List of directories, relative to source directory, that shouldn't be searched
78 # for source files.
79 exclude_trees = ['.build']
80
81 # The reST default role (used for this markup: `text`) to use for all documents.
82 #default_role = None
83
84 # If true, '()' will be appended to :func: etc. cross-reference text.
85 #add_function_parentheses = True
86
87 # If true, the current module name will be prepended to all description
88 # unit titles (such as .. function::).
89 #add_module_names = True
90
91 # If true, sectionauthor and moduleauthor directives will be shown in the
92 # output. They are ignored by default.
93 #show_authors = False
94
95 # The name of the Pygments (syntax highlighting) style to use.
96 pygments_style = 'sphinx'
97
98
99 # Options for HTML output
100 # -----------------------
101
102 # The theme to use for HTML and HTML Help pages. See the documentation for
103 # a list of builtin themes.
104 html_theme = 'sphinx_rtd_theme'
105
106 # Theme options are theme-specific and customize the look and feel of a theme
107 # further. For a list of options available for each theme, see the
108 # documentation.
109 #html_theme_options = {}
110
111 # Add any paths that contain custom themes here, relative to this directory.
112 # Add path to the RTD explicitly to robustify builds (otherwise might
113 # fail in a clean Debian build env)
114 import sphinx_rtd_theme
115 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
116
117
118 # The style sheet to use for HTML and HTML Help pages. A file of that name
119 # must exist either in Sphinx' static/ path, or in one of the custom paths
120 # given in html_static_path.
121 # html_style = 'scrapydoc.css'
122
123 # The name for this set of Sphinx documents. If None, it defaults to
124 # "<project> v<release> documentation".
125 #html_title = None
126
127 # A shorter title for the navigation bar. Default is the same as html_title.
128 #html_short_title = None
129
130 # The name of an image file (relative to this directory) to place at the top
131 # of the sidebar.
132 #html_logo = None
133
134 # The name of an image file (within the static path) to use as favicon of the
135 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
136 # pixels large.
137 #html_favicon = None
138
139 # Add any paths that contain custom static files (such as style sheets) here,
140 # relative to this directory. They are copied after the builtin static files,
141 # so a file named "default.css" will overwrite the builtin "default.css".
142 html_static_path = ['_static']
143
144 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
145 # using the given strftime format.
146 html_last_updated_fmt = '%b %d, %Y'
147
148 # Custom sidebar templates, maps document names to template names.
149 #html_sidebars = {}
150
151 # Additional templates that should be rendered to pages, maps page names to
152 # template names.
153 #html_additional_pages = {}
154
155 # If false, no module index is generated.
156 #html_use_modindex = True
157
158 # If false, no index is generated.
159 #html_use_index = True
160
161 # If true, the index is split into individual pages for each letter.
162 #html_split_index = False
163
164 # If true, the reST sources are included in the HTML build as _sources/<name>.
165 html_copy_source = True
166
167 # If true, an OpenSearch description file will be output, and all pages will
168 # contain a <link> tag referring to it. The value of this option must be the
169 # base URL from which the finished HTML is served.
170 #html_use_opensearch = ''
171
172 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
173 #html_file_suffix = ''
174
175 # Output file base name for HTML help builder.
176 htmlhelp_basename = 'Scrapydoc'
177
178
179 # Options for LaTeX output
180 # ------------------------
181
182 # The paper size ('letter' or 'a4').
183 #latex_paper_size = 'letter'
184
185 # The font size ('10pt', '11pt' or '12pt').
186 #latex_font_size = '10pt'
187
188 # Grouping the document tree into LaTeX files. List of tuples
189 # (source start file, target name, title, author, document class [howto/manual]).
190 latex_documents = [
191 ('index', 'Scrapy.tex', u'Scrapy Documentation',
192 u'Scrapy developers', 'manual'),
193 ]
194
195 # The name of an image file (relative to this directory) to place at the top of
196 # the title page.
197 #latex_logo = None
198
199 # For "manual" documents, if this is true, then toplevel headings are parts,
200 # not chapters.
201 #latex_use_parts = False
202
203 # Additional stuff for the LaTeX preamble.
204 #latex_preamble = ''
205
206 # Documents to append as an appendix to all manuals.
207 #latex_appendices = []
208
209 # If false, no module index is generated.
210 #latex_use_modindex = True
211
212
213 # Options for the linkcheck builder
214 # ---------------------------------
215
216 # A list of regular expressions that match URIs that should not be checked when
217 # doing a linkcheck build.
218 linkcheck_ignore = [
219 'http://localhost:\d+', 'http://hg.scrapy.org',
220 'http://directory.google.com/'
221 ]
222
223
224 # Options for the Coverage extension
225 # ----------------------------------
226 coverage_ignore_pyobjects = [
227 # Contract’s add_pre_hook and add_post_hook are not documented because
228 # they should be transparent to contract developers, for whom pre_hook and
229 # post_hook should be the actual concern.
230 r'\bContract\.add_(pre|post)_hook$',
231
232 # ContractsManager is an internal class, developers are not expected to
233 # interact with it directly in any way.
234 r'\bContractsManager\b$',
235
236 # For default contracts we only want to document their general purpose in
237 # their constructor, the methods they reimplement to achieve that purpose
238 # should be irrelevant to developers using those contracts.
239 r'\w+Contract\.(adjust_request_args|(pre|post)_process)$',
240
241 # Methods of downloader middlewares are not documented, only the classes
242 # themselves, since downloader middlewares are controlled through Scrapy
243 # settings.
244 r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.',
245
246 # Base classes of downloader middlewares are implementation details that
247 # are not meant for users.
248 r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware',
249
250 # Private exception used by the command-line interface implementation.
251 r'^scrapy\.exceptions\.UsageError',
252 ]
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -30,6 +30,7 @@
'scrapydocs',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
+ 'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
@@ -74,6 +75,8 @@
# List of documents that shouldn't be included in the build.
#unused_docs = []
+exclude_patterns = ['build']
+
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
@@ -250,3 +253,11 @@
# Private exception used by the command-line interface implementation.
r'^scrapy\.exceptions\.UsageError',
]
+
+
+# Options for the InterSphinx extension
+# -------------------------------------
+
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+}
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -30,6 +30,7 @@\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n+ 'sphinx.ext.intersphinx',\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n@@ -74,6 +75,8 @@\n # List of documents that shouldn't be included in the build.\n #unused_docs = []\n \n+exclude_patterns = ['build']\n+\n # List of directories, relative to source directory, that shouldn't be searched\n # for source files.\n exclude_trees = ['.build']\n@@ -250,3 +253,11 @@\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n ]\n+\n+\n+# Options for the InterSphinx extension\n+# -------------------------------------\n+\n+intersphinx_mapping = {\n+ 'python': ('https://docs.python.org/3', None),\n+}\n", "issue": "Document changed CrawlerProcess.crawl(spider) functionality in Release notes\nPossible Regression. See explanation beneath spider.\r\n\r\nMWE Testcode:\r\n\r\n```python3\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n#\r\nimport logging\r\nimport scrapy\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass Spider(scrapy.Spider):\r\n\r\n name = 'Spidy'\r\n\r\n def start_requests(self):\r\n yield scrapy.Request('https://scrapy.org/')\r\n\r\n def parse(self, response):\r\n logger.info('Here I fetched %s for you. [%s]' % (response.url, response.status))\r\n return {\r\n 'status': response.status,\r\n 'url': response.url,\r\n 'test': 'item',\r\n }\r\n\r\n\r\nclass LogPipeline(object):\r\n\r\n def process_item(self, item, spider):\r\n logger.warning('HIT ME PLEASE')\r\n logger.info('Got hit by:\\n %r' % item)\r\n return item\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from scrapy.settings import Settings\r\n from scrapy.crawler import CrawlerProcess\r\n\r\n settings = Settings(values={\r\n 'TELNETCONSOLE_ENABLED': False, # necessary evil :(\r\n 'EXTENSIONS': {\r\n 'scrapy.extensions.telnet.TelnetConsole': None,\r\n },\r\n 'ITEM_PIPELINES': {\r\n '__main__.LogPipeline': 800,\r\n },\r\n })\r\n\r\n spider = Spider()\r\n\r\n process = CrawlerProcess(settings=settings)\r\n process.crawl(spider)\r\n process.start()\r\n```\r\n\r\nI just tried this functional (with Scrapy 1.5.1) example script on current master codebase and I got this error:\r\n```\r\n2019-07-12 13:54:16 [scrapy.utils.log] INFO: Scrapy 1.6.0 started (bot: scrapybot)\r\n2019-07-12 13:54:16 [scrapy.utils.log] INFO: Versions: lxml 4.3.2.0, libxml2 2.9.4, cssselect 1.0.3, parsel 1.5.0, w3lib 1.20.0, Twisted 18.9.0, Python 3.7.3 (default, Apr 3 2019, 05:39:12) - [GCC 8.3.0], pyOpenSSL 19.0.0 (OpenSSL 1.1.1c 28 May 2019), cryptography 2.6.1, Platform Linux-4.9.0-8-amd64-x86_64-with-debian-10.0\r\nTraceback (most recent call last):\r\n File \"./test.py\", line 60, in <module>\r\n process.crawl(spider)\r\n File \"[...]/scrapy.git/scrapy/crawler.py\", line 180, in crawl\r\n 'The crawler_or_spidercls argument cannot be a spider object, '\r\nValueError: The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)\r\n```\r\n\r\nLooking at the codebase, blame blames this change: https://github.com/scrapy/scrapy/pull/3610\r\n\r\nBut that procedure (passing a spider instance as `process.crawl(spider)`) is taken pretty much verbatim from the (latest) docs, so it should continue to work, ~or first get deprecated~?: https://docs.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script\r\n\r\n**edit:/** to clarify, I don't mind the functionality getting removed without deprecation, if it was never documented, as it seems it wasn't.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Scrapy'\ncopyright = u'2008\u20132018, Scrapy developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', u'Scrapy Documentation',\n u'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their constructor, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Scrapy'\ncopyright = u'2008\u20132018, Scrapy developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', u'Scrapy Documentation',\n u'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their constructor, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n", "path": "docs/conf.py"}]}
| 3,769 | 229 |
gh_patches_debug_12366
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add kwargs to EfficientDet model() method
Add kwargs to EfficientDet model() method. This will allow to pass `pretrained_backbone` argument to EfficientDet `create_model_from_config()` method. That will prevent loading pretrained weights if the user wish to do so
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/ross/efficientdet/model.py`
Content:
```
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5 from icevision.models.ross.efficientdet.utils import *
6 from icevision.models.ross.efficientdet.backbones import *
7 from effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench
8 from effdet import create_model_from_config
9 from effdet.efficientdet import HeadNet
10
11
12 def model(
13 backbone: EfficientDetBackboneConfig,
14 num_classes: int,
15 img_size: int,
16 ) -> nn.Module:
17 """Creates the efficientdet model specified by `model_name`.
18
19 The model implementation is by Ross Wightman, original repo
20 [here](https://github.com/rwightman/efficientdet-pytorch).
21
22 # Arguments
23 backbone: Specifies the backbone to use create the model. For pretrained models, check
24 [this](https://github.com/rwightman/efficientdet-pytorch#models) table.
25 num_classes: Number of classes of your dataset (including background).
26 img_size: Image size that will be fed to the model. Must be squared and
27 divisible by 128.
28
29 # Returns
30 A PyTorch model.
31 """
32 model_name = backbone.model_name
33 config = get_efficientdet_config(model_name=model_name)
34 config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size
35
36 model_bench = create_model_from_config(
37 config,
38 bench_task="train",
39 bench_labeler=True,
40 num_classes=num_classes - 1,
41 pretrained=backbone.pretrained,
42 )
43
44 # TODO: Break down param groups for backbone
45 def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:
46 unwrapped = unwrap_bench(model)
47
48 layers = [
49 unwrapped.backbone,
50 unwrapped.fpn,
51 nn.Sequential(unwrapped.class_net, unwrapped.box_net),
52 ]
53 param_groups = [list(layer.parameters()) for layer in layers]
54 check_all_model_params_in_groups2(model, param_groups)
55
56 return param_groups
57
58 model_bench.param_groups = MethodType(param_groups_fn, model_bench)
59
60 return model_bench
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/ross/efficientdet/model.py b/icevision/models/ross/efficientdet/model.py
--- a/icevision/models/ross/efficientdet/model.py
+++ b/icevision/models/ross/efficientdet/model.py
@@ -13,6 +13,7 @@
backbone: EfficientDetBackboneConfig,
num_classes: int,
img_size: int,
+ **kwargs,
) -> nn.Module:
"""Creates the efficientdet model specified by `model_name`.
@@ -39,6 +40,7 @@
bench_labeler=True,
num_classes=num_classes - 1,
pretrained=backbone.pretrained,
+ **kwargs,
)
# TODO: Break down param groups for backbone
|
{"golden_diff": "diff --git a/icevision/models/ross/efficientdet/model.py b/icevision/models/ross/efficientdet/model.py\n--- a/icevision/models/ross/efficientdet/model.py\n+++ b/icevision/models/ross/efficientdet/model.py\n@@ -13,6 +13,7 @@\n backbone: EfficientDetBackboneConfig,\n num_classes: int,\n img_size: int,\n+ **kwargs,\n ) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n \n@@ -39,6 +40,7 @@\n bench_labeler=True,\n num_classes=num_classes - 1,\n pretrained=backbone.pretrained,\n+ **kwargs,\n )\n \n # TODO: Break down param groups for backbone\n", "issue": "Add kwargs to EfficientDet model() method\nAdd kwargs to EfficientDet model() method. This will allow to pass `pretrained_backbone` argument to EfficientDet `create_model_from_config()` method. That will prevent loading pretrained weights if the user wish to do so\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.models.ross.efficientdet.utils import *\nfrom icevision.models.ross.efficientdet.backbones import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n backbone: EfficientDetBackboneConfig,\n num_classes: int,\n img_size: int,\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n backbone: Specifies the backbone to use create the model. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 128.\n\n # Returns\n A PyTorch model.\n \"\"\"\n model_name = backbone.model_name\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes - 1,\n pretrained=backbone.pretrained,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/ross/efficientdet/model.py"}], "after_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.models.ross.efficientdet.utils import *\nfrom icevision.models.ross.efficientdet.backbones import *\nfrom effdet import get_efficientdet_config, EfficientDet, DetBenchTrain, unwrap_bench\nfrom effdet import create_model_from_config\nfrom effdet.efficientdet import HeadNet\n\n\ndef model(\n backbone: EfficientDetBackboneConfig,\n num_classes: int,\n img_size: int,\n **kwargs,\n) -> nn.Module:\n \"\"\"Creates the efficientdet model specified by `model_name`.\n\n The model implementation is by Ross Wightman, original repo\n [here](https://github.com/rwightman/efficientdet-pytorch).\n\n # Arguments\n backbone: Specifies the backbone to use create the model. For pretrained models, check\n [this](https://github.com/rwightman/efficientdet-pytorch#models) table.\n num_classes: Number of classes of your dataset (including background).\n img_size: Image size that will be fed to the model. Must be squared and\n divisible by 128.\n\n # Returns\n A PyTorch model.\n \"\"\"\n model_name = backbone.model_name\n config = get_efficientdet_config(model_name=model_name)\n config.image_size = (img_size, img_size) if isinstance(img_size, int) else img_size\n\n model_bench = create_model_from_config(\n config,\n bench_task=\"train\",\n bench_labeler=True,\n num_classes=num_classes - 1,\n pretrained=backbone.pretrained,\n **kwargs,\n )\n\n # TODO: Break down param groups for backbone\n def param_groups_fn(model: nn.Module) -> List[List[nn.Parameter]]:\n unwrapped = unwrap_bench(model)\n\n layers = [\n unwrapped.backbone,\n unwrapped.fpn,\n nn.Sequential(unwrapped.class_net, unwrapped.box_net),\n ]\n param_groups = [list(layer.parameters()) for layer in layers]\n check_all_model_params_in_groups2(model, param_groups)\n\n return param_groups\n\n model_bench.param_groups = MethodType(param_groups_fn, model_bench)\n\n return model_bench\n", "path": "icevision/models/ross/efficientdet/model.py"}]}
| 917 | 166 |
gh_patches_debug_1082
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-2093
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WarpedVRT context exit doesn't not set the dataset as closed
It's me again with a WarpedVRT bug (I'm sorry).
Basically I wanted to know the state of the WarpedVRT dataset after I exited the context manager, and it seems that the WarpedVRT is not set to `closed` but if I try to to `vrt.read()` rasterio will error with `RasterioIOError: Dataset is closed: WarpedVRT(tests/fixtures/cog_gcps.tif)`
```python
with rasterio.open("tests/fixtures/cog.tif") as src:
with WarpedVRT(src) as vrt:
assert not src.closed
assert not vrt.closed # <open WarpedVRT name='WarpedVRT(tests/fixtures/cog.tif)' mode='r'>
assert vrt.closed # <--- AssertionError | <open WarpedVRT name='WarpedVRT(tests/fixtures/cog.tif)' mode='r'>
assert src.closed
assert vrt.closed. # <-- still not closed here either
```
System:
- Mac Os
- rasterio: '1.2b4'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/vrt.py`
Content:
```
1 """rasterio.vrt: a module concerned with GDAL VRTs"""
2
3 import xml.etree.ElementTree as ET
4
5 import rasterio
6 from rasterio._warp import WarpedVRTReaderBase
7 from rasterio.dtypes import _gdal_typename
8 from rasterio.enums import MaskFlags
9 from rasterio.env import env_ctx_if_needed
10 from rasterio.path import parse_path
11 from rasterio.transform import TransformMethodsMixin
12 from rasterio.windows import WindowMethodsMixin
13
14
15 class WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,
16 TransformMethodsMixin):
17 """A virtual warped dataset.
18
19 Abstracts the details of raster warping and allows access to data
20 that is reprojected when read.
21
22 This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.
23
24 Parameters
25 ----------
26 src_dataset : dataset object
27 The warp source.
28 src_crs : CRS or str, optional
29 Overrides the coordinate reference system of `src_dataset`.
30 src_transfrom : Affine, optional
31 Overrides the transform of `src_dataset`.
32 src_nodata : float, optional
33 Overrides the nodata value of `src_dataset`, which is the
34 default.
35 crs : CRS or str, optional
36 The coordinate reference system at the end of the warp
37 operation. Default: the crs of `src_dataset`. dst_crs is
38 a deprecated alias for this parameter.
39 transform : Affine, optional
40 The transform for the virtual dataset. Default: will be
41 computed from the attributes of `src_dataset`. dst_transform
42 is a deprecated alias for this parameter.
43 height, width: int, optional
44 The dimensions of the virtual dataset. Defaults: will be
45 computed from the attributes of `src_dataset`. dst_height
46 and dst_width are deprecated alias for these parameters.
47 nodata : float, optional
48 Nodata value for the virtual dataset. Default: the nodata
49 value of `src_dataset` or 0.0. dst_nodata is a deprecated
50 alias for this parameter.
51 resampling : Resampling, optional
52 Warp resampling algorithm. Default: `Resampling.nearest`.
53 tolerance : float, optional
54 The maximum error tolerance in input pixels when
55 approximating the warp transformation. Default: 0.125,
56 or one-eigth of a pixel.
57 src_alpha : int, optional
58 Index of a source band to use as an alpha band for warping.
59 add_alpha : bool, optional
60 Whether to add an alpha masking band to the virtual dataset.
61 Default: False. This option will cause deletion of the VRT
62 nodata value.
63 init_dest_nodata : bool, optional
64 Whether or not to initialize output to `nodata`. Default:
65 True.
66 warp_mem_limit : int, optional
67 The warp operation's memory limit in MB. The default (0)
68 means 64 MB with GDAL 2.2.
69 dtype : str, optional
70 The working data type for warp operation and output.
71 warp_extras : dict
72 GDAL extra warp options. See
73 https://gdal.org/doxygen/structGDALWarpOptions.html.
74
75 Attributes
76 ----------
77 src_dataset : dataset
78 The dataset object to be virtually warped.
79 resampling : int
80 One of the values from rasterio.enums.Resampling. The default is
81 `Resampling.nearest`.
82 tolerance : float
83 The maximum error tolerance in input pixels when approximating
84 the warp transformation. The default is 0.125.
85 src_nodata: int or float, optional
86 The source nodata value. Pixels with this value will not be
87 used for interpolation. If not set, it will be default to the
88 nodata value of the source image, if available.
89 dst_nodata: int or float, optional
90 The nodata value used to initialize the destination; it will
91 remain in all areas not covered by the reprojected source.
92 Defaults to the value of src_nodata, or 0 (gdal default).
93 working_dtype : str, optional
94 The working data type for warp operation and output.
95 warp_extras : dict
96 GDAL extra warp options. See
97 https://gdal.org/doxygen/structGDALWarpOptions.html.
98
99 Examples
100 --------
101
102 >>> with rasterio.open('tests/data/RGB.byte.tif') as src:
103 ... with WarpedVRT(src, crs='EPSG:3857') as vrt:
104 ... data = vrt.read()
105
106 """
107
108 def __repr__(self):
109 return "<{} WarpedVRT name='{}' mode='{}'>".format(
110 self.closed and 'closed' or 'open', self.name, self.mode)
111
112 def __enter__(self):
113 self._env = env_ctx_if_needed()
114 self._env.__enter__()
115 self.start()
116 return self
117
118 def __exit__(self, *args, **kwargs):
119 self._env.__exit__()
120 self.close()
121
122 def __del__(self):
123 self.close()
124
125 def close(self):
126 self.stop()
127
128
129 def _boundless_vrt_doc(
130 src_dataset, nodata=None, background=None, hidenodata=False,
131 width=None, height=None, transform=None, masked=False):
132 """Make a VRT XML document.
133
134 Parameters
135 ----------
136 src_dataset : Dataset
137 The dataset to wrap.
138 background : int or float, optional
139 The background fill value for the boundless VRT.
140 masked : bool
141 If True, the src_dataset is replaced by its valid data mask.
142
143 Returns
144 -------
145 str
146 An XML text string.
147 """
148
149 nodata = nodata or src_dataset.nodata
150 width = width or src_dataset.width
151 height = height or src_dataset.height
152 transform = transform or src_dataset.transform
153
154 vrtdataset = ET.Element('VRTDataset')
155 vrtdataset.attrib['rasterYSize'] = str(height)
156 vrtdataset.attrib['rasterXSize'] = str(width)
157 srs = ET.SubElement(vrtdataset, 'SRS')
158 srs.text = src_dataset.crs.wkt if src_dataset.crs else ""
159 geotransform = ET.SubElement(vrtdataset, 'GeoTransform')
160 geotransform.text = ','.join([str(v) for v in transform.to_gdal()])
161
162 for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):
163 vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')
164 vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)
165 vrtrasterband.attrib['band'] = str(bidx)
166
167 if background is not None or nodata is not None:
168 nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')
169 nodatavalue.text = str(background or nodata)
170
171 if hidenodata:
172 hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')
173 hidenodatavalue.text = "1"
174
175 colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')
176 colorinterp.text = ci.name.capitalize()
177
178 complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')
179 sourcefilename = ET.SubElement(complexsource, 'SourceFilename')
180 sourcefilename.attrib['relativeToVRT'] = "0"
181 sourcefilename.attrib["shared"] = "0"
182 sourcefilename.text = parse_path(src_dataset.name).as_vsi()
183 sourceband = ET.SubElement(complexsource, 'SourceBand')
184 sourceband.text = str(bidx)
185 sourceproperties = ET.SubElement(complexsource, 'SourceProperties')
186 sourceproperties.attrib['RasterXSize'] = str(width)
187 sourceproperties.attrib['RasterYSize'] = str(height)
188 sourceproperties.attrib['dataType'] = _gdal_typename(dtype)
189 sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
190 sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
191 srcrect = ET.SubElement(complexsource, 'SrcRect')
192 srcrect.attrib['xOff'] = '0'
193 srcrect.attrib['yOff'] = '0'
194 srcrect.attrib['xSize'] = str(src_dataset.width)
195 srcrect.attrib['ySize'] = str(src_dataset.height)
196 dstrect = ET.SubElement(complexsource, 'DstRect')
197 dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)
198 dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)
199 dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)
200 dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)
201
202 if src_dataset.nodata is not None:
203 nodata_elem = ET.SubElement(complexsource, 'NODATA')
204 nodata_elem.text = str(src_dataset.nodata)
205
206 if src_dataset.options is not None:
207 openoptions = ET.SubElement(complexsource, 'OpenOptions')
208 for ookey, oovalue in src_dataset.options.items():
209 ooi = ET.SubElement(openoptions, 'OOI')
210 ooi.attrib['key'] = str(ookey)
211 ooi.text = str(oovalue)
212
213 # Effectively replaces all values of the source dataset with
214 # 255. Due to GDAL optimizations, the source dataset will not
215 # be read, so we get a performance improvement.
216 if masked:
217 scaleratio = ET.SubElement(complexsource, 'ScaleRatio')
218 scaleratio.text = '0'
219 scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')
220 scaleoffset.text = '255'
221
222 if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):
223 maskband = ET.SubElement(vrtdataset, 'MaskBand')
224 vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')
225 vrtrasterband.attrib['dataType'] = 'Byte'
226
227 simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')
228 sourcefilename = ET.SubElement(simplesource, 'SourceFilename')
229 sourcefilename.attrib['relativeToVRT'] = "0"
230 sourcefilename.attrib["shared"] = "0"
231 sourcefilename.text = parse_path(src_dataset.name).as_vsi()
232
233 sourceband = ET.SubElement(simplesource, 'SourceBand')
234 sourceband.text = 'mask,1'
235 sourceproperties = ET.SubElement(simplesource, 'SourceProperties')
236 sourceproperties.attrib['RasterXSize'] = str(width)
237 sourceproperties.attrib['RasterYSize'] = str(height)
238 sourceproperties.attrib['dataType'] = 'Byte'
239 sourceproperties.attrib['BlockYSize'] = str(block_shape[0])
240 sourceproperties.attrib['BlockXSize'] = str(block_shape[1])
241 srcrect = ET.SubElement(simplesource, 'SrcRect')
242 srcrect.attrib['xOff'] = '0'
243 srcrect.attrib['yOff'] = '0'
244 srcrect.attrib['xSize'] = str(src_dataset.width)
245 srcrect.attrib['ySize'] = str(src_dataset.height)
246 dstrect = ET.SubElement(simplesource, 'DstRect')
247 dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)
248 dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)
249 dstrect.attrib['xSize'] = str(src_dataset.width)
250 dstrect.attrib['ySize'] = str(src_dataset.height)
251
252 return ET.tostring(vrtdataset).decode('ascii')
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rasterio/vrt.py b/rasterio/vrt.py
--- a/rasterio/vrt.py
+++ b/rasterio/vrt.py
@@ -122,9 +122,6 @@
def __del__(self):
self.close()
- def close(self):
- self.stop()
-
def _boundless_vrt_doc(
src_dataset, nodata=None, background=None, hidenodata=False,
|
{"golden_diff": "diff --git a/rasterio/vrt.py b/rasterio/vrt.py\n--- a/rasterio/vrt.py\n+++ b/rasterio/vrt.py\n@@ -122,9 +122,6 @@\n def __del__(self):\n self.close()\n \n- def close(self):\n- self.stop()\n-\n \n def _boundless_vrt_doc(\n src_dataset, nodata=None, background=None, hidenodata=False,\n", "issue": "WarpedVRT context exit doesn't not set the dataset as closed\nIt's me again with a WarpedVRT bug (I'm sorry). \r\n\r\nBasically I wanted to know the state of the WarpedVRT dataset after I exited the context manager, and it seems that the WarpedVRT is not set to `closed` but if I try to to `vrt.read()` rasterio will error with `RasterioIOError: Dataset is closed: WarpedVRT(tests/fixtures/cog_gcps.tif)`\r\n\r\n```python\r\nwith rasterio.open(\"tests/fixtures/cog.tif\") as src:\r\n with WarpedVRT(src) as vrt:\r\n assert not src.closed\r\n assert not vrt.closed # <open WarpedVRT name='WarpedVRT(tests/fixtures/cog.tif)' mode='r'>\r\n assert vrt.closed # <--- AssertionError | <open WarpedVRT name='WarpedVRT(tests/fixtures/cog.tif)' mode='r'>\r\nassert src.closed\r\nassert vrt.closed. # <-- still not closed here either\r\n```\r\n\r\nSystem: \r\n- Mac Os \r\n- rasterio: '1.2b4'`\n", "before_files": [{"content": "\"\"\"rasterio.vrt: a module concerned with GDAL VRTs\"\"\"\n\nimport xml.etree.ElementTree as ET\n\nimport rasterio\nfrom rasterio._warp import WarpedVRTReaderBase\nfrom rasterio.dtypes import _gdal_typename\nfrom rasterio.enums import MaskFlags\nfrom rasterio.env import env_ctx_if_needed\nfrom rasterio.path import parse_path\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.windows import WindowMethodsMixin\n\n\nclass WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"A virtual warped dataset.\n\n Abstracts the details of raster warping and allows access to data\n that is reprojected when read.\n\n This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.\n\n Parameters\n ----------\n src_dataset : dataset object\n The warp source.\n src_crs : CRS or str, optional\n Overrides the coordinate reference system of `src_dataset`.\n src_transfrom : Affine, optional\n Overrides the transform of `src_dataset`.\n src_nodata : float, optional\n Overrides the nodata value of `src_dataset`, which is the\n default.\n crs : CRS or str, optional\n The coordinate reference system at the end of the warp\n operation. Default: the crs of `src_dataset`. dst_crs is\n a deprecated alias for this parameter.\n transform : Affine, optional\n The transform for the virtual dataset. Default: will be\n computed from the attributes of `src_dataset`. dst_transform\n is a deprecated alias for this parameter.\n height, width: int, optional\n The dimensions of the virtual dataset. Defaults: will be\n computed from the attributes of `src_dataset`. dst_height\n and dst_width are deprecated alias for these parameters.\n nodata : float, optional\n Nodata value for the virtual dataset. Default: the nodata\n value of `src_dataset` or 0.0. dst_nodata is a deprecated\n alias for this parameter.\n resampling : Resampling, optional\n Warp resampling algorithm. Default: `Resampling.nearest`.\n tolerance : float, optional\n The maximum error tolerance in input pixels when\n approximating the warp transformation. Default: 0.125,\n or one-eigth of a pixel.\n src_alpha : int, optional\n Index of a source band to use as an alpha band for warping.\n add_alpha : bool, optional\n Whether to add an alpha masking band to the virtual dataset.\n Default: False. This option will cause deletion of the VRT\n nodata value.\n init_dest_nodata : bool, optional\n Whether or not to initialize output to `nodata`. Default:\n True.\n warp_mem_limit : int, optional\n The warp operation's memory limit in MB. The default (0)\n means 64 MB with GDAL 2.2.\n dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Attributes\n ----------\n src_dataset : dataset\n The dataset object to be virtually warped.\n resampling : int\n One of the values from rasterio.enums.Resampling. The default is\n `Resampling.nearest`.\n tolerance : float\n The maximum error tolerance in input pixels when approximating\n the warp transformation. The default is 0.125.\n src_nodata: int or float, optional\n The source nodata value. Pixels with this value will not be\n used for interpolation. If not set, it will be default to the\n nodata value of the source image, if available.\n dst_nodata: int or float, optional\n The nodata value used to initialize the destination; it will\n remain in all areas not covered by the reprojected source.\n Defaults to the value of src_nodata, or 0 (gdal default).\n working_dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Examples\n --------\n\n >>> with rasterio.open('tests/data/RGB.byte.tif') as src:\n ... with WarpedVRT(src, crs='EPSG:3857') as vrt:\n ... data = vrt.read()\n\n \"\"\"\n\n def __repr__(self):\n return \"<{} WarpedVRT name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n def __enter__(self):\n self._env = env_ctx_if_needed()\n self._env.__enter__()\n self.start()\n return self\n\n def __exit__(self, *args, **kwargs):\n self._env.__exit__()\n self.close()\n\n def __del__(self):\n self.close()\n\n def close(self):\n self.stop()\n\n\ndef _boundless_vrt_doc(\n src_dataset, nodata=None, background=None, hidenodata=False,\n width=None, height=None, transform=None, masked=False):\n \"\"\"Make a VRT XML document.\n\n Parameters\n ----------\n src_dataset : Dataset\n The dataset to wrap.\n background : int or float, optional\n The background fill value for the boundless VRT.\n masked : bool\n If True, the src_dataset is replaced by its valid data mask.\n\n Returns\n -------\n str\n An XML text string.\n \"\"\"\n\n nodata = nodata or src_dataset.nodata\n width = width or src_dataset.width\n height = height or src_dataset.height\n transform = transform or src_dataset.transform\n\n vrtdataset = ET.Element('VRTDataset')\n vrtdataset.attrib['rasterYSize'] = str(height)\n vrtdataset.attrib['rasterXSize'] = str(width)\n srs = ET.SubElement(vrtdataset, 'SRS')\n srs.text = src_dataset.crs.wkt if src_dataset.crs else \"\"\n geotransform = ET.SubElement(vrtdataset, 'GeoTransform')\n geotransform.text = ','.join([str(v) for v in transform.to_gdal()])\n\n for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):\n vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)\n vrtrasterband.attrib['band'] = str(bidx)\n\n if background is not None or nodata is not None:\n nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')\n nodatavalue.text = str(background or nodata)\n\n if hidenodata:\n hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')\n hidenodatavalue.text = \"1\"\n\n colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')\n colorinterp.text = ci.name.capitalize()\n\n complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')\n sourcefilename = ET.SubElement(complexsource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = parse_path(src_dataset.name).as_vsi()\n sourceband = ET.SubElement(complexsource, 'SourceBand')\n sourceband.text = str(bidx)\n sourceproperties = ET.SubElement(complexsource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = _gdal_typename(dtype)\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(complexsource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(complexsource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)\n dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)\n\n if src_dataset.nodata is not None:\n nodata_elem = ET.SubElement(complexsource, 'NODATA')\n nodata_elem.text = str(src_dataset.nodata)\n\n if src_dataset.options is not None:\n openoptions = ET.SubElement(complexsource, 'OpenOptions')\n for ookey, oovalue in src_dataset.options.items():\n ooi = ET.SubElement(openoptions, 'OOI')\n ooi.attrib['key'] = str(ookey)\n ooi.text = str(oovalue)\n\n # Effectively replaces all values of the source dataset with\n # 255. Due to GDAL optimizations, the source dataset will not\n # be read, so we get a performance improvement.\n if masked:\n scaleratio = ET.SubElement(complexsource, 'ScaleRatio')\n scaleratio.text = '0'\n scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')\n scaleoffset.text = '255'\n\n if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):\n maskband = ET.SubElement(vrtdataset, 'MaskBand')\n vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = 'Byte'\n\n simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')\n sourcefilename = ET.SubElement(simplesource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = parse_path(src_dataset.name).as_vsi()\n\n sourceband = ET.SubElement(simplesource, 'SourceBand')\n sourceband.text = 'mask,1'\n sourceproperties = ET.SubElement(simplesource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = 'Byte'\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(simplesource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(simplesource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width)\n dstrect.attrib['ySize'] = str(src_dataset.height)\n\n return ET.tostring(vrtdataset).decode('ascii')\n", "path": "rasterio/vrt.py"}], "after_files": [{"content": "\"\"\"rasterio.vrt: a module concerned with GDAL VRTs\"\"\"\n\nimport xml.etree.ElementTree as ET\n\nimport rasterio\nfrom rasterio._warp import WarpedVRTReaderBase\nfrom rasterio.dtypes import _gdal_typename\nfrom rasterio.enums import MaskFlags\nfrom rasterio.env import env_ctx_if_needed\nfrom rasterio.path import parse_path\nfrom rasterio.transform import TransformMethodsMixin\nfrom rasterio.windows import WindowMethodsMixin\n\n\nclass WarpedVRT(WarpedVRTReaderBase, WindowMethodsMixin,\n TransformMethodsMixin):\n \"\"\"A virtual warped dataset.\n\n Abstracts the details of raster warping and allows access to data\n that is reprojected when read.\n\n This class is backed by an in-memory GDAL VRTWarpedDataset VRT file.\n\n Parameters\n ----------\n src_dataset : dataset object\n The warp source.\n src_crs : CRS or str, optional\n Overrides the coordinate reference system of `src_dataset`.\n src_transfrom : Affine, optional\n Overrides the transform of `src_dataset`.\n src_nodata : float, optional\n Overrides the nodata value of `src_dataset`, which is the\n default.\n crs : CRS or str, optional\n The coordinate reference system at the end of the warp\n operation. Default: the crs of `src_dataset`. dst_crs is\n a deprecated alias for this parameter.\n transform : Affine, optional\n The transform for the virtual dataset. Default: will be\n computed from the attributes of `src_dataset`. dst_transform\n is a deprecated alias for this parameter.\n height, width: int, optional\n The dimensions of the virtual dataset. Defaults: will be\n computed from the attributes of `src_dataset`. dst_height\n and dst_width are deprecated alias for these parameters.\n nodata : float, optional\n Nodata value for the virtual dataset. Default: the nodata\n value of `src_dataset` or 0.0. dst_nodata is a deprecated\n alias for this parameter.\n resampling : Resampling, optional\n Warp resampling algorithm. Default: `Resampling.nearest`.\n tolerance : float, optional\n The maximum error tolerance in input pixels when\n approximating the warp transformation. Default: 0.125,\n or one-eigth of a pixel.\n src_alpha : int, optional\n Index of a source band to use as an alpha band for warping.\n add_alpha : bool, optional\n Whether to add an alpha masking band to the virtual dataset.\n Default: False. This option will cause deletion of the VRT\n nodata value.\n init_dest_nodata : bool, optional\n Whether or not to initialize output to `nodata`. Default:\n True.\n warp_mem_limit : int, optional\n The warp operation's memory limit in MB. The default (0)\n means 64 MB with GDAL 2.2.\n dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Attributes\n ----------\n src_dataset : dataset\n The dataset object to be virtually warped.\n resampling : int\n One of the values from rasterio.enums.Resampling. The default is\n `Resampling.nearest`.\n tolerance : float\n The maximum error tolerance in input pixels when approximating\n the warp transformation. The default is 0.125.\n src_nodata: int or float, optional\n The source nodata value. Pixels with this value will not be\n used for interpolation. If not set, it will be default to the\n nodata value of the source image, if available.\n dst_nodata: int or float, optional\n The nodata value used to initialize the destination; it will\n remain in all areas not covered by the reprojected source.\n Defaults to the value of src_nodata, or 0 (gdal default).\n working_dtype : str, optional\n The working data type for warp operation and output.\n warp_extras : dict\n GDAL extra warp options. See\n https://gdal.org/doxygen/structGDALWarpOptions.html.\n\n Examples\n --------\n\n >>> with rasterio.open('tests/data/RGB.byte.tif') as src:\n ... with WarpedVRT(src, crs='EPSG:3857') as vrt:\n ... data = vrt.read()\n\n \"\"\"\n\n def __repr__(self):\n return \"<{} WarpedVRT name='{}' mode='{}'>\".format(\n self.closed and 'closed' or 'open', self.name, self.mode)\n\n def __enter__(self):\n self._env = env_ctx_if_needed()\n self._env.__enter__()\n self.start()\n return self\n\n def __exit__(self, *args, **kwargs):\n self._env.__exit__()\n self.close()\n\n def __del__(self):\n self.close()\n\n\ndef _boundless_vrt_doc(\n src_dataset, nodata=None, background=None, hidenodata=False,\n width=None, height=None, transform=None, masked=False):\n \"\"\"Make a VRT XML document.\n\n Parameters\n ----------\n src_dataset : Dataset\n The dataset to wrap.\n background : int or float, optional\n The background fill value for the boundless VRT.\n masked : bool\n If True, the src_dataset is replaced by its valid data mask.\n\n Returns\n -------\n str\n An XML text string.\n \"\"\"\n\n nodata = nodata or src_dataset.nodata\n width = width or src_dataset.width\n height = height or src_dataset.height\n transform = transform or src_dataset.transform\n\n vrtdataset = ET.Element('VRTDataset')\n vrtdataset.attrib['rasterYSize'] = str(height)\n vrtdataset.attrib['rasterXSize'] = str(width)\n srs = ET.SubElement(vrtdataset, 'SRS')\n srs.text = src_dataset.crs.wkt if src_dataset.crs else \"\"\n geotransform = ET.SubElement(vrtdataset, 'GeoTransform')\n geotransform.text = ','.join([str(v) for v in transform.to_gdal()])\n\n for bidx, ci, block_shape, dtype in zip(src_dataset.indexes, src_dataset.colorinterp, src_dataset.block_shapes, src_dataset.dtypes):\n vrtrasterband = ET.SubElement(vrtdataset, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = _gdal_typename(dtype)\n vrtrasterband.attrib['band'] = str(bidx)\n\n if background is not None or nodata is not None:\n nodatavalue = ET.SubElement(vrtrasterband, 'NoDataValue')\n nodatavalue.text = str(background or nodata)\n\n if hidenodata:\n hidenodatavalue = ET.SubElement(vrtrasterband, 'HideNoDataValue')\n hidenodatavalue.text = \"1\"\n\n colorinterp = ET.SubElement(vrtrasterband, 'ColorInterp')\n colorinterp.text = ci.name.capitalize()\n\n complexsource = ET.SubElement(vrtrasterband, 'ComplexSource')\n sourcefilename = ET.SubElement(complexsource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = parse_path(src_dataset.name).as_vsi()\n sourceband = ET.SubElement(complexsource, 'SourceBand')\n sourceband.text = str(bidx)\n sourceproperties = ET.SubElement(complexsource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = _gdal_typename(dtype)\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(complexsource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(complexsource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width * src_dataset.transform.a / transform.a)\n dstrect.attrib['ySize'] = str(src_dataset.height * src_dataset.transform.e / transform.e)\n\n if src_dataset.nodata is not None:\n nodata_elem = ET.SubElement(complexsource, 'NODATA')\n nodata_elem.text = str(src_dataset.nodata)\n\n if src_dataset.options is not None:\n openoptions = ET.SubElement(complexsource, 'OpenOptions')\n for ookey, oovalue in src_dataset.options.items():\n ooi = ET.SubElement(openoptions, 'OOI')\n ooi.attrib['key'] = str(ookey)\n ooi.text = str(oovalue)\n\n # Effectively replaces all values of the source dataset with\n # 255. Due to GDAL optimizations, the source dataset will not\n # be read, so we get a performance improvement.\n if masked:\n scaleratio = ET.SubElement(complexsource, 'ScaleRatio')\n scaleratio.text = '0'\n scaleoffset = ET.SubElement(complexsource, 'ScaleOffset')\n scaleoffset.text = '255'\n\n if all(MaskFlags.per_dataset in flags for flags in src_dataset.mask_flag_enums):\n maskband = ET.SubElement(vrtdataset, 'MaskBand')\n vrtrasterband = ET.SubElement(maskband, 'VRTRasterBand')\n vrtrasterband.attrib['dataType'] = 'Byte'\n\n simplesource = ET.SubElement(vrtrasterband, 'SimpleSource')\n sourcefilename = ET.SubElement(simplesource, 'SourceFilename')\n sourcefilename.attrib['relativeToVRT'] = \"0\"\n sourcefilename.attrib[\"shared\"] = \"0\"\n sourcefilename.text = parse_path(src_dataset.name).as_vsi()\n\n sourceband = ET.SubElement(simplesource, 'SourceBand')\n sourceband.text = 'mask,1'\n sourceproperties = ET.SubElement(simplesource, 'SourceProperties')\n sourceproperties.attrib['RasterXSize'] = str(width)\n sourceproperties.attrib['RasterYSize'] = str(height)\n sourceproperties.attrib['dataType'] = 'Byte'\n sourceproperties.attrib['BlockYSize'] = str(block_shape[0])\n sourceproperties.attrib['BlockXSize'] = str(block_shape[1])\n srcrect = ET.SubElement(simplesource, 'SrcRect')\n srcrect.attrib['xOff'] = '0'\n srcrect.attrib['yOff'] = '0'\n srcrect.attrib['xSize'] = str(src_dataset.width)\n srcrect.attrib['ySize'] = str(src_dataset.height)\n dstrect = ET.SubElement(simplesource, 'DstRect')\n dstrect.attrib['xOff'] = str((src_dataset.transform.xoff - transform.xoff) / transform.a)\n dstrect.attrib['yOff'] = str((src_dataset.transform.yoff - transform.yoff) / transform.e)\n dstrect.attrib['xSize'] = str(src_dataset.width)\n dstrect.attrib['ySize'] = str(src_dataset.height)\n\n return ET.tostring(vrtdataset).decode('ascii')\n", "path": "rasterio/vrt.py"}]}
| 3,739 | 99 |
gh_patches_debug_8463
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-3272
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DAE.ww3a.ADWAV test hangs on anlworkstation using PIO2
This is a confirmed regression caused by PR #3101, which turns off USE_MALLOC in PIO2 build.
While we will try to fix this issue in PIO2, it is recommended to turn on USE_MALLOC by default.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/lib/CIME/case/case_submit.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 submit, check_case and check_da_settings are members of class Case in file case.py
8 """
9 from six.moves import configparser
10 from CIME.XML.standard_module_setup import *
11 from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg, CIMEError
12 from CIME.locked_files import unlock_file, lock_file
13 from CIME.test_status import *
14
15 import socket
16
17 logger = logging.getLogger(__name__)
18
19 def _build_prereq_str(case, prev_job_ids):
20 delimiter = case.get_value("depend_separator")
21 prereq_str = ""
22 for job_id in prev_job_ids.values():
23 prereq_str += str(job_id) + delimiter
24 return prereq_str[:-1]
25
26 def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,
27 resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,
28 batch_args=None, workflow=True):
29 if job is None:
30 job = case.get_first_job()
31
32 # Check if CONTINUE_RUN value makes sense
33 if job != "case.test" and case.get_value("CONTINUE_RUN"):
34 rundir = case.get_value("RUNDIR")
35 expect(os.path.isdir(rundir),
36 "CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
37 # only checks for the first instance in a multidriver case
38 if case.get_value("COMP_INTERFACE") == "nuopc":
39 rpointer = "rpointer.med"
40 elif case.get_value("MULTI_DRIVER"):
41 rpointer = "rpointer.drv_0001"
42 else:
43 rpointer = "rpointer.drv"
44 expect(os.path.exists(os.path.join(rundir,rpointer)),
45 "CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}".format(rundir,rpointer))
46 # Finally we open the rpointer file and check that it's correct
47 casename = case.get_value("CASE")
48 with open(os.path.join(rundir,rpointer), "r") as fd:
49 ncfile = fd.readline().strip()
50 expect(ncfile.startswith(casename) and
51 os.path.exists(os.path.join(rundir,ncfile)),
52 "File {ncfile} not present or does not match case {casename}".
53 format(ncfile=os.path.join(rundir,ncfile),casename=casename))
54
55 # if case.submit is called with the no_batch flag then we assume that this
56 # flag will stay in effect for the duration of the RESUBMITs
57 env_batch = case.get_env("batch")
58 external_workflow = case.get_value("EXTERNAL_WORKFLOW")
59 if resubmit and env_batch.get_batch_system_type() == "none" or external_workflow:
60 no_batch = True
61 if no_batch:
62 batch_system = "none"
63 else:
64 batch_system = env_batch.get_batch_system_type()
65 unlock_file(os.path.basename(env_batch.filename))
66 case.set_value("BATCH_SYSTEM", batch_system)
67
68 env_batch_has_changed = False
69 if not external_workflow:
70 try:
71 case.check_lockedfile(os.path.basename(env_batch.filename))
72 except:
73 env_batch_has_changed = True
74
75 if batch_system != "none" and env_batch_has_changed and not external_workflow:
76 # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)
77 logger.warning(\
78 """
79 env_batch.xml appears to have changed, regenerating batch scripts
80 manual edits to these file will be lost!
81 """)
82 env_batch.make_all_batch_files(case)
83 case.flush()
84 lock_file(os.path.basename(env_batch.filename))
85
86 if resubmit:
87 # This is a resubmission, do not reinitialize test values
88 if job == "case.test":
89 case.set_value("IS_FIRST_RUN", False)
90
91 resub = case.get_value("RESUBMIT")
92 logger.info("Submitting job '{}', resubmit={:d}".format(job, resub))
93 case.set_value("RESUBMIT", resub-1)
94 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
95 case.set_value("CONTINUE_RUN", True)
96
97 else:
98 if job == "case.test":
99 case.set_value("IS_FIRST_RUN", True)
100
101 if no_batch:
102 batch_system = "none"
103 else:
104 batch_system = env_batch.get_batch_system_type()
105
106 case.set_value("BATCH_SYSTEM", batch_system)
107
108 env_batch_has_changed = False
109 try:
110 case.check_lockedfile(os.path.basename(env_batch.filename))
111 except CIMEError:
112 env_batch_has_changed = True
113
114 if env_batch.get_batch_system_type() != "none" and env_batch_has_changed:
115 # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)
116 logger.warning(\
117 """
118 env_batch.xml appears to have changed, regenerating batch scripts
119 manual edits to these file will be lost!
120 """)
121 env_batch.make_all_batch_files(case)
122
123 unlock_file(os.path.basename(env_batch.filename))
124 lock_file(os.path.basename(env_batch.filename))
125
126 if job == case.get_primary_job():
127 case.check_case()
128 case.check_DA_settings()
129 if case.get_value("MACH") == "mira":
130 with open(".original_host", "w") as fd:
131 fd.write( socket.gethostname())
132
133 #Load Modules
134 case.load_env()
135
136 case.flush()
137
138 logger.warning("submit_jobs {}".format(job))
139 job_ids = case.submit_jobs(no_batch=no_batch, job=job, prereq=prereq,
140 skip_pnl=skip_pnl, resubmit_immediate=resubmit_immediate,
141 allow_fail=allow_fail, mail_user=mail_user,
142 mail_type=mail_type, batch_args=batch_args, workflow=workflow)
143
144 xml_jobids = []
145 for jobname, jobid in job_ids.items():
146 logger.info("Submitted job {} with id {}".format(jobname, jobid))
147 if jobid:
148 xml_jobids.append("{}:{}".format(jobname, jobid))
149
150 xml_jobid_text = ", ".join(xml_jobids)
151 if xml_jobid_text:
152 case.set_value("JOB_IDS", xml_jobid_text)
153
154 return xml_jobid_text
155
156 def submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,
157 resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,
158 batch_args=None, workflow=True):
159 if resubmit_immediate and self.get_value("MACH") in ['mira', 'cetus']:
160 logger.warning("resubmit_immediate does not work on Mira/Cetus, submitting normally")
161 resubmit_immediate = False
162
163 caseroot = self.get_value("CASEROOT")
164 if self.get_value("TEST"):
165 casebaseid = self.get_value("CASEBASEID")
166 # This should take care of the race condition where the submitted job
167 # begins immediately and tries to set RUN phase. We proactively assume
168 # a passed SUBMIT phase. If this state is already PASS, don't set it again
169 # because then we'll lose RUN phase info if it's there. This info is important
170 # for system_tests_common to know if it needs to reinitialize the test or not.
171 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
172 phase_status = ts.get_status(SUBMIT_PHASE)
173 if phase_status != TEST_PASS_STATUS:
174 ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
175
176 # If this is a resubmit check the hidden file .submit_options for
177 # any submit options used on the original submit and use them again
178 submit_options = os.path.join(caseroot, ".submit_options")
179 if resubmit and os.path.exists(submit_options):
180 config = configparser.RawConfigParser()
181 config.read(submit_options)
182 if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):
183 skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')
184 if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):
185 mail_user = config.get('SubmitOptions', 'mail_user')
186 if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):
187 mail_type = str(config.get('SubmitOptions', 'mail_type')).split(',')
188 if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):
189 batch_args = config.get('SubmitOptions', 'batch_args')
190
191 try:
192 functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,
193 allow_fail=allow_fail, resubmit=resubmit,
194 resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl,
195 mail_user=mail_user, mail_type=mail_type,
196 batch_args=batch_args, workflow=workflow)
197 run_and_log_case_status(functor, "case.submit", caseroot=caseroot,
198 custom_success_msg_functor=verbatim_success_msg)
199 except BaseException: # Want to catch KeyboardInterrupt too
200 # If something failed in the batch system, make sure to mark
201 # the test as failed if we are running a test.
202 if self.get_value("TEST"):
203 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
204 ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)
205
206 raise
207
208 def check_case(self):
209 self.check_lockedfiles()
210 self.create_namelists() # Must be called before check_all_input_data
211 logger.info("Checking that inputdata is available as part of case submission")
212 self.check_all_input_data()
213
214 if self.get_value('COMP_WAV') == 'ww':
215 # the ww3 buildnml has dependancies on inputdata so we must run it again
216 self.create_namelists(component='WAV')
217
218 expect(self.get_value("BUILD_COMPLETE"), "Build complete is "
219 "not True please rebuild the model by calling case.build")
220 logger.info("Check case OK")
221
222 def check_DA_settings(self):
223 script = self.get_value("DATA_ASSIMILATION_SCRIPT")
224 cycles = self.get_value("DATA_ASSIMILATION_CYCLES")
225 if len(script) > 0 and os.path.isfile(script) and cycles > 0:
226 logger.info("Data Assimilation enabled using script {} with {:d} cycles".format(script,
227 cycles))
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py
--- a/scripts/lib/CIME/case/case_submit.py
+++ b/scripts/lib/CIME/case/case_submit.py
@@ -56,8 +56,9 @@
# flag will stay in effect for the duration of the RESUBMITs
env_batch = case.get_env("batch")
external_workflow = case.get_value("EXTERNAL_WORKFLOW")
- if resubmit and env_batch.get_batch_system_type() == "none" or external_workflow:
+ if env_batch.get_batch_system_type() == "none" or resubmit and external_workflow:
no_batch = True
+
if no_batch:
batch_system = "none"
else:
|
{"golden_diff": "diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py\n--- a/scripts/lib/CIME/case/case_submit.py\n+++ b/scripts/lib/CIME/case/case_submit.py\n@@ -56,8 +56,9 @@\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n external_workflow = case.get_value(\"EXTERNAL_WORKFLOW\")\n- if resubmit and env_batch.get_batch_system_type() == \"none\" or external_workflow:\n+ if env_batch.get_batch_system_type() == \"none\" or resubmit and external_workflow:\n no_batch = True\n+\n if no_batch:\n batch_system = \"none\"\n else:\n", "issue": "DAE.ww3a.ADWAV test hangs on anlworkstation using PIO2\nThis is a confirmed regression caused by PR #3101, which turns off USE_MALLOC in PIO2 build.\r\nWhile we will try to fix this issue in PIO2, it is recommended to turn on USE_MALLOC by default.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\nsubmit, check_case and check_da_settings are members of class Case in file case.py\n\"\"\"\nfrom six.moves import configparser\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status, verbatim_success_msg, CIMEError\nfrom CIME.locked_files import unlock_file, lock_file\nfrom CIME.test_status import *\n\nimport socket\n\nlogger = logging.getLogger(__name__)\n\ndef _build_prereq_str(case, prev_job_ids):\n delimiter = case.get_value(\"depend_separator\")\n prereq_str = \"\"\n for job_id in prev_job_ids.values():\n prereq_str += str(job_id) + delimiter\n return prereq_str[:-1]\n\ndef _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,\n resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,\n batch_args=None, workflow=True):\n if job is None:\n job = case.get_first_job()\n\n # Check if CONTINUE_RUN value makes sense\n if job != \"case.test\" and case.get_value(\"CONTINUE_RUN\"):\n rundir = case.get_value(\"RUNDIR\")\n expect(os.path.isdir(rundir),\n \"CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n # only checks for the first instance in a multidriver case\n if case.get_value(\"COMP_INTERFACE\") == \"nuopc\":\n rpointer = \"rpointer.med\"\n elif case.get_value(\"MULTI_DRIVER\"):\n rpointer = \"rpointer.drv_0001\"\n else:\n rpointer = \"rpointer.drv\"\n expect(os.path.exists(os.path.join(rundir,rpointer)),\n \"CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}\".format(rundir,rpointer))\n # Finally we open the rpointer file and check that it's correct\n casename = case.get_value(\"CASE\")\n with open(os.path.join(rundir,rpointer), \"r\") as fd:\n ncfile = fd.readline().strip()\n expect(ncfile.startswith(casename) and\n os.path.exists(os.path.join(rundir,ncfile)),\n \"File {ncfile} not present or does not match case {casename}\".\n format(ncfile=os.path.join(rundir,ncfile),casename=casename))\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n external_workflow = case.get_value(\"EXTERNAL_WORKFLOW\")\n if resubmit and env_batch.get_batch_system_type() == \"none\" or external_workflow:\n no_batch = True\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n unlock_file(os.path.basename(env_batch.filename))\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n if not external_workflow:\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except:\n env_batch_has_changed = True\n\n if batch_system != \"none\" and env_batch_has_changed and not external_workflow:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n case.flush()\n lock_file(os.path.basename(env_batch.filename))\n\n if resubmit:\n # This is a resubmission, do not reinitialize test values\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", False)\n\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '{}', resubmit={:d}\".format(job, resub))\n case.set_value(\"RESUBMIT\", resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n\n else:\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", True)\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except CIMEError:\n env_batch_has_changed = True\n\n if env_batch.get_batch_system_type() != \"none\" and env_batch_has_changed:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n\n unlock_file(os.path.basename(env_batch.filename))\n lock_file(os.path.basename(env_batch.filename))\n\n if job == case.get_primary_job():\n case.check_case()\n case.check_DA_settings()\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\", \"w\") as fd:\n fd.write( socket.gethostname())\n\n #Load Modules\n case.load_env()\n\n case.flush()\n\n logger.warning(\"submit_jobs {}\".format(job))\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, prereq=prereq,\n skip_pnl=skip_pnl, resubmit_immediate=resubmit_immediate,\n allow_fail=allow_fail, mail_user=mail_user,\n mail_type=mail_type, batch_args=batch_args, workflow=workflow)\n\n xml_jobids = []\n for jobname, jobid in job_ids.items():\n logger.info(\"Submitted job {} with id {}\".format(jobname, jobid))\n if jobid:\n xml_jobids.append(\"{}:{}\".format(jobname, jobid))\n\n xml_jobid_text = \", \".join(xml_jobids)\n if xml_jobid_text:\n case.set_value(\"JOB_IDS\", xml_jobid_text)\n\n return xml_jobid_text\n\ndef submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,\n resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,\n batch_args=None, workflow=True):\n if resubmit_immediate and self.get_value(\"MACH\") in ['mira', 'cetus']:\n logger.warning(\"resubmit_immediate does not work on Mira/Cetus, submitting normally\")\n resubmit_immediate = False\n\n caseroot = self.get_value(\"CASEROOT\")\n if self.get_value(\"TEST\"):\n casebaseid = self.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n # If this is a resubmit check the hidden file .submit_options for\n # any submit options used on the original submit and use them again\n submit_options = os.path.join(caseroot, \".submit_options\")\n if resubmit and os.path.exists(submit_options):\n config = configparser.RawConfigParser()\n config.read(submit_options)\n if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):\n skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')\n if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):\n mail_user = config.get('SubmitOptions', 'mail_user')\n if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):\n mail_type = str(config.get('SubmitOptions', 'mail_type')).split(',')\n if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):\n batch_args = config.get('SubmitOptions', 'batch_args')\n\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n allow_fail=allow_fail, resubmit=resubmit,\n resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args, workflow=workflow)\n run_and_log_case_status(functor, \"case.submit\", caseroot=caseroot,\n custom_success_msg_functor=verbatim_success_msg)\n except BaseException: # Want to catch KeyboardInterrupt too\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if self.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(self):\n self.check_lockedfiles()\n self.create_namelists() # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n self.check_all_input_data()\n\n if self.get_value('COMP_WAV') == 'ww':\n # the ww3 buildnml has dependancies on inputdata so we must run it again\n self.create_namelists(component='WAV')\n\n expect(self.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(self):\n script = self.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = self.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if len(script) > 0 and os.path.isfile(script) and cycles > 0:\n logger.info(\"Data Assimilation enabled using script {} with {:d} cycles\".format(script,\n cycles))\n", "path": "scripts/lib/CIME/case/case_submit.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\nsubmit, check_case and check_da_settings are members of class Case in file case.py\n\"\"\"\nfrom six.moves import configparser\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status, verbatim_success_msg, CIMEError\nfrom CIME.locked_files import unlock_file, lock_file\nfrom CIME.test_status import *\n\nimport socket\n\nlogger = logging.getLogger(__name__)\n\ndef _build_prereq_str(case, prev_job_ids):\n delimiter = case.get_value(\"depend_separator\")\n prereq_str = \"\"\n for job_id in prev_job_ids.values():\n prereq_str += str(job_id) + delimiter\n return prereq_str[:-1]\n\ndef _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,\n resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,\n batch_args=None, workflow=True):\n if job is None:\n job = case.get_first_job()\n\n # Check if CONTINUE_RUN value makes sense\n if job != \"case.test\" and case.get_value(\"CONTINUE_RUN\"):\n rundir = case.get_value(\"RUNDIR\")\n expect(os.path.isdir(rundir),\n \"CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n # only checks for the first instance in a multidriver case\n if case.get_value(\"COMP_INTERFACE\") == \"nuopc\":\n rpointer = \"rpointer.med\"\n elif case.get_value(\"MULTI_DRIVER\"):\n rpointer = \"rpointer.drv_0001\"\n else:\n rpointer = \"rpointer.drv\"\n expect(os.path.exists(os.path.join(rundir,rpointer)),\n \"CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}\".format(rundir,rpointer))\n # Finally we open the rpointer file and check that it's correct\n casename = case.get_value(\"CASE\")\n with open(os.path.join(rundir,rpointer), \"r\") as fd:\n ncfile = fd.readline().strip()\n expect(ncfile.startswith(casename) and\n os.path.exists(os.path.join(rundir,ncfile)),\n \"File {ncfile} not present or does not match case {casename}\".\n format(ncfile=os.path.join(rundir,ncfile),casename=casename))\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n external_workflow = case.get_value(\"EXTERNAL_WORKFLOW\")\n if env_batch.get_batch_system_type() == \"none\" or resubmit and external_workflow:\n no_batch = True\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n unlock_file(os.path.basename(env_batch.filename))\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n if not external_workflow:\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except:\n env_batch_has_changed = True\n\n if batch_system != \"none\" and env_batch_has_changed and not external_workflow:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n case.flush()\n lock_file(os.path.basename(env_batch.filename))\n\n if resubmit:\n # This is a resubmission, do not reinitialize test values\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", False)\n\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '{}', resubmit={:d}\".format(job, resub))\n case.set_value(\"RESUBMIT\", resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n\n else:\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", True)\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except CIMEError:\n env_batch_has_changed = True\n\n if env_batch.get_batch_system_type() != \"none\" and env_batch_has_changed:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n\n unlock_file(os.path.basename(env_batch.filename))\n lock_file(os.path.basename(env_batch.filename))\n\n if job == case.get_primary_job():\n case.check_case()\n case.check_DA_settings()\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\", \"w\") as fd:\n fd.write( socket.gethostname())\n\n #Load Modules\n case.load_env()\n\n case.flush()\n\n logger.warning(\"submit_jobs {}\".format(job))\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, prereq=prereq,\n skip_pnl=skip_pnl, resubmit_immediate=resubmit_immediate,\n allow_fail=allow_fail, mail_user=mail_user,\n mail_type=mail_type, batch_args=batch_args, workflow=workflow)\n\n xml_jobids = []\n for jobname, jobid in job_ids.items():\n logger.info(\"Submitted job {} with id {}\".format(jobname, jobid))\n if jobid:\n xml_jobids.append(\"{}:{}\".format(jobname, jobid))\n\n xml_jobid_text = \", \".join(xml_jobids)\n if xml_jobid_text:\n case.set_value(\"JOB_IDS\", xml_jobid_text)\n\n return xml_jobid_text\n\ndef submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False,\n resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None,\n batch_args=None, workflow=True):\n if resubmit_immediate and self.get_value(\"MACH\") in ['mira', 'cetus']:\n logger.warning(\"resubmit_immediate does not work on Mira/Cetus, submitting normally\")\n resubmit_immediate = False\n\n caseroot = self.get_value(\"CASEROOT\")\n if self.get_value(\"TEST\"):\n casebaseid = self.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n # If this is a resubmit check the hidden file .submit_options for\n # any submit options used on the original submit and use them again\n submit_options = os.path.join(caseroot, \".submit_options\")\n if resubmit and os.path.exists(submit_options):\n config = configparser.RawConfigParser()\n config.read(submit_options)\n if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'):\n skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl')\n if mail_user is None and config.has_option('SubmitOptions', 'mail_user'):\n mail_user = config.get('SubmitOptions', 'mail_user')\n if mail_type is None and config.has_option('SubmitOptions', 'mail_type'):\n mail_type = str(config.get('SubmitOptions', 'mail_type')).split(',')\n if batch_args is None and config.has_option('SubmitOptions', 'batch_args'):\n batch_args = config.get('SubmitOptions', 'batch_args')\n\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n allow_fail=allow_fail, resubmit=resubmit,\n resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args, workflow=workflow)\n run_and_log_case_status(functor, \"case.submit\", caseroot=caseroot,\n custom_success_msg_functor=verbatim_success_msg)\n except BaseException: # Want to catch KeyboardInterrupt too\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if self.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(self):\n self.check_lockedfiles()\n self.create_namelists() # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n self.check_all_input_data()\n\n if self.get_value('COMP_WAV') == 'ww':\n # the ww3 buildnml has dependancies on inputdata so we must run it again\n self.create_namelists(component='WAV')\n\n expect(self.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(self):\n script = self.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = self.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if len(script) > 0 and os.path.isfile(script) and cycles > 0:\n logger.info(\"Data Assimilation enabled using script {} with {:d} cycles\".format(script,\n cycles))\n", "path": "scripts/lib/CIME/case/case_submit.py"}]}
| 3,239 | 169 |
gh_patches_debug_1
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-866
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Only one organisation can be created per account
It seems that only one organisation can be created from a login account. The folks at Camptocamp have two separate organisations (companies) and are unable to create the second organisation from their login.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/core/settings/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django_project/core/settings/__init__.py b/django_project/core/settings/__init__.py
--- a/django_project/core/settings/__init__.py
+++ b/django_project/core/settings/__init__.py
@@ -0,0 +1 @@
+# coding=utf-8
|
{"golden_diff": "diff --git a/django_project/core/settings/__init__.py b/django_project/core/settings/__init__.py\n--- a/django_project/core/settings/__init__.py\n+++ b/django_project/core/settings/__init__.py\n@@ -0,0 +1 @@\n+# coding=utf-8\n", "issue": "Only one organisation can be created per account \nIt seems that only one organisation can be created from a login account. The folks at Camptocamp have two separate organisations (companies) and are unable to create the second organisation from their login.\r\n\n", "before_files": [{"content": "", "path": "django_project/core/settings/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n", "path": "django_project/core/settings/__init__.py"}]}
| 313 | 62 |
gh_patches_debug_15596
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-1272
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`fetch` errors when run after doing a flat file install in the same session
Hello,
Following https://github.com/weecology/retriever/issues/1262#issuecomment-469062884
```python
In [1]: import retriever as rt
In [2]: rt.install_csv("wine-composition")
Installing ./wine_composition_WineComposition.csv: 100%|████████████████████████████████████████████████████████████| 178/178 [00:00<00:00, 6408.68rows/s]
Out[2]: <retriever.engines.csvengine.engine at 0x114eeac18>
In [3]: wine_data = rt.fetch("wine-composition")
Installing wine_composition_WineComposition: 50%|█████████████████████████████████ | 178/356 [00:00<00:00, 4464.16rows/s]
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)
1377 else:
-> 1378 cur.execute(*args)
1379 return cur
OperationalError: near ".": syntax error
During handling of the above exception, another exception occurred:
DatabaseError Traceback (most recent call last)
<ipython-input-3-5b818d622ba9> in <module>
----> 1 wine_data = rt.fetch("wine-composition")
~/anaconda3/lib/python3.6/site-packages/retriever/lib/fetch.py in fetch(dataset, file, table_name)
12 sqlite = engine()
13 sqlite.opts = {"file": file, "table_name": table_name}
---> 14 df = sqlite.fetch_tables(dataset, db_table_names)
15 return df
~/anaconda3/lib/python3.6/site-packages/retriever/engines/sqlite.py in fetch_tables(self, dataset, table_names)
46 "FROM {};".format(table),
47 connection)
---> 48 for table in table_names}
49 return data
50
~/anaconda3/lib/python3.6/site-packages/retriever/engines/sqlite.py in <dictcomp>(.0)
46 "FROM {};".format(table),
47 connection)
---> 48 for table in table_names}
49 return data
50
~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in read_sql_query(sql, con, index_col, coerce_float, params, parse_dates, chunksize)
312 return pandas_sql.read_query(
313 sql, index_col=index_col, params=params, coerce_float=coerce_float,
--> 314 parse_dates=parse_dates, chunksize=chunksize)
315
316
~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in read_query(self, sql, index_col, coerce_float, params, parse_dates, chunksize)
1411
1412 args = _convert_params(sql, params)
-> 1413 cursor = self.execute(*args)
1414 columns = [col_desc[0] for col_desc in cursor.description]
1415
~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)
1388 ex = DatabaseError(
1389 "Execution failed on sql '%s': %s" % (args[0], exc))
-> 1390 raise_with_traceback(ex)
1391
1392 @staticmethod
~/anaconda3/lib/python3.6/site-packages/pandas/compat/__init__.py in raise_with_traceback(exc, traceback)
402 if traceback == Ellipsis:
403 _, _, traceback = sys.exc_info()
--> 404 raise exc.with_traceback(traceback)
405 else:
406 # this version of raise is a syntax error in Python 3
~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)
1376 cur.execute(*args, **kwargs)
1377 else:
-> 1378 cur.execute(*args)
1379 return cur
1380 except Exception as exc:
DatabaseError: Execution failed on sql 'SELECT * FROM ./wine_composition_WineComposition.csv;': near ".": syntax error
```
Maybe a message should warm user that `fetch` can't be use after installing dataset as a CSV file.
Kind regards
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `retriever/lib/install.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import os
5
6 from retriever.engines import choose_engine
7 from retriever.lib.defaults import DATA_DIR, SCRIPT_WRITE_PATH
8 from retriever.lib.scripts import SCRIPT_LIST
9 from retriever.lib.engine_tools import name_matches
10 from retriever.lib.repository import check_for_updates
11
12
13 def _install(args, use_cache, debug):
14 """Install datasets for retriever."""
15 engine = choose_engine(args)
16 engine.use_cache = use_cache
17
18 script_list = SCRIPT_LIST()
19 if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
20 check_for_updates()
21 script_list = SCRIPT_LIST()
22 data_sets_scripts = name_matches(script_list, args['dataset'])
23 if data_sets_scripts:
24 for data_sets_script in data_sets_scripts:
25 try:
26 data_sets_script.download(engine, debug=debug)
27 data_sets_script.engine.final_cleanup()
28 except Exception as e:
29 print(e)
30 if debug:
31 raise
32 else:
33 message = "Run retriever.datasets()to list the currently available " \
34 "datasets."
35 raise ValueError(message)
36 return engine
37
38
39 def install_csv(dataset,
40 table_name='{db}_{table}.csv',
41 data_dir=DATA_DIR, debug=False, use_cache=True):
42 """Install datasets into csv."""
43 args = {
44 'command': 'install',
45 'dataset': dataset,
46 'engine': 'csv',
47 'table_name': table_name,
48 'data_dir': data_dir
49 }
50 return _install(args, use_cache, debug)
51
52
53 def install_mysql(dataset, user='root', password='', host='localhost',
54 port=3306, database_name='{db}', table_name='{db}.{table}',
55 debug=False, use_cache=True):
56 """Install datasets into mysql."""
57 args = {
58 'command': 'install',
59 'database_name': database_name,
60 'engine': 'mysql',
61 'dataset': dataset,
62 'host': host,
63 'port': port,
64 'password': password,
65 'table_name': table_name,
66 'user': user
67 }
68 return _install(args, use_cache, debug)
69
70
71 def install_postgres(dataset, user='postgres', password='',
72 host='localhost', port=5432, database='postgres',
73 database_name='{db}', table_name='{db}.{table}', bbox=[],
74 debug=False, use_cache=True):
75 """Install datasets into postgres."""
76 args = {
77 'command': 'install',
78 'database': database,
79 'database_name': database_name,
80 'engine': 'postgres',
81 'dataset': dataset,
82 'host': host,
83 'port': port,
84 'password': password,
85 'table_name': table_name,
86 'user': user,
87 'bbox': bbox
88 }
89 return _install(args, use_cache, debug)
90
91
92 def install_sqlite(dataset, file='sqlite.db',
93 table_name='{db}_{table}',
94 data_dir=DATA_DIR,
95 debug=False, use_cache=True):
96 """Install datasets into sqlite."""
97 args = {
98 'command': 'install',
99 'dataset': dataset,
100 'engine': 'sqlite',
101 'file': file,
102 'table_name': table_name,
103 'data_dir': data_dir
104 }
105 return _install(args, use_cache, debug)
106
107
108 def install_msaccess(dataset, file='access.mdb',
109 table_name='[{db} {table}]',
110 data_dir=DATA_DIR,
111 debug=False, use_cache=True):
112 """Install datasets into msaccess."""
113 args = {
114 'command': 'install',
115 'dataset': dataset,
116 'engine': 'msaccess',
117 'file': file,
118 'table_name': table_name,
119 'data_dir': data_dir
120 }
121 return _install(args, use_cache, debug)
122
123
124 def install_json(dataset,
125 table_name='{db}_{table}.json',
126 data_dir=DATA_DIR, debug=False, use_cache=True):
127 """Install datasets into json."""
128 args = {
129 'command': 'install',
130 'dataset': dataset,
131 'engine': 'json',
132 'table_name': table_name,
133 'data_dir': data_dir
134 }
135 return _install(args, use_cache, debug)
136
137
138 def install_xml(dataset,
139 table_name='{db}_{table}.xml',
140 data_dir=DATA_DIR, debug=False, use_cache=True):
141 """Install datasets into xml."""
142 args = {
143 'command': 'install',
144 'dataset': dataset,
145 'engine': 'xml',
146 'table_name': table_name,
147 'data_dir': data_dir
148 }
149 return _install(args, use_cache, debug)
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/retriever/lib/install.py b/retriever/lib/install.py
--- a/retriever/lib/install.py
+++ b/retriever/lib/install.py
@@ -2,6 +2,7 @@
from __future__ import print_function
import os
+from collections import OrderedDict
from retriever.engines import choose_engine
from retriever.lib.defaults import DATA_DIR, SCRIPT_WRITE_PATH
@@ -23,6 +24,7 @@
if data_sets_scripts:
for data_sets_script in data_sets_scripts:
try:
+ engine.script_table_registry = OrderedDict()
data_sets_script.download(engine, debug=debug)
data_sets_script.engine.final_cleanup()
except Exception as e:
|
{"golden_diff": "diff --git a/retriever/lib/install.py b/retriever/lib/install.py\n--- a/retriever/lib/install.py\n+++ b/retriever/lib/install.py\n@@ -2,6 +2,7 @@\n from __future__ import print_function\n \n import os\n+from collections import OrderedDict\n \n from retriever.engines import choose_engine\n from retriever.lib.defaults import DATA_DIR, SCRIPT_WRITE_PATH\n@@ -23,6 +24,7 @@\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n try:\n+ engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n", "issue": "`fetch` errors when run after doing a flat file install in the same session\nHello,\r\n\r\nFollowing https://github.com/weecology/retriever/issues/1262#issuecomment-469062884\r\n\r\n```python\r\nIn [1]: import retriever as rt\r\n\r\nIn [2]: rt.install_csv(\"wine-composition\")\r\nInstalling ./wine_composition_WineComposition.csv: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 178/178 [00:00<00:00, 6408.68rows/s]\r\nOut[2]: <retriever.engines.csvengine.engine at 0x114eeac18>\r\n\r\nIn [3]: wine_data = rt.fetch(\"wine-composition\")\r\nInstalling wine_composition_WineComposition: 50%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 | 178/356 [00:00<00:00, 4464.16rows/s]\r\n---------------------------------------------------------------------------\r\nOperationalError Traceback (most recent call last)\r\n~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)\r\n 1377 else:\r\n-> 1378 cur.execute(*args)\r\n 1379 return cur\r\n\r\nOperationalError: near \".\": syntax error\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nDatabaseError Traceback (most recent call last)\r\n<ipython-input-3-5b818d622ba9> in <module>\r\n----> 1 wine_data = rt.fetch(\"wine-composition\")\r\n\r\n~/anaconda3/lib/python3.6/site-packages/retriever/lib/fetch.py in fetch(dataset, file, table_name)\r\n 12 sqlite = engine()\r\n 13 sqlite.opts = {\"file\": file, \"table_name\": table_name}\r\n---> 14 df = sqlite.fetch_tables(dataset, db_table_names)\r\n 15 return df\r\n\r\n~/anaconda3/lib/python3.6/site-packages/retriever/engines/sqlite.py in fetch_tables(self, dataset, table_names)\r\n 46 \"FROM {};\".format(table),\r\n 47 connection)\r\n---> 48 for table in table_names}\r\n 49 return data\r\n 50\r\n\r\n~/anaconda3/lib/python3.6/site-packages/retriever/engines/sqlite.py in <dictcomp>(.0)\r\n 46 \"FROM {};\".format(table),\r\n 47 connection)\r\n---> 48 for table in table_names}\r\n 49 return data\r\n 50\r\n\r\n~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in read_sql_query(sql, con, index_col, coerce_float, params, parse_dates, chunksize)\r\n 312 return pandas_sql.read_query(\r\n 313 sql, index_col=index_col, params=params, coerce_float=coerce_float,\r\n--> 314 parse_dates=parse_dates, chunksize=chunksize)\r\n 315\r\n 316\r\n\r\n~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in read_query(self, sql, index_col, coerce_float, params, parse_dates, chunksize)\r\n 1411\r\n 1412 args = _convert_params(sql, params)\r\n-> 1413 cursor = self.execute(*args)\r\n 1414 columns = [col_desc[0] for col_desc in cursor.description]\r\n 1415\r\n\r\n~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)\r\n 1388 ex = DatabaseError(\r\n 1389 \"Execution failed on sql '%s': %s\" % (args[0], exc))\r\n-> 1390 raise_with_traceback(ex)\r\n 1391\r\n 1392 @staticmethod\r\n\r\n~/anaconda3/lib/python3.6/site-packages/pandas/compat/__init__.py in raise_with_traceback(exc, traceback)\r\n 402 if traceback == Ellipsis:\r\n 403 _, _, traceback = sys.exc_info()\r\n--> 404 raise exc.with_traceback(traceback)\r\n 405 else:\r\n 406 # this version of raise is a syntax error in Python 3\r\n\r\n~/anaconda3/lib/python3.6/site-packages/pandas/io/sql.py in execute(self, *args, **kwargs)\r\n 1376 cur.execute(*args, **kwargs)\r\n 1377 else:\r\n-> 1378 cur.execute(*args)\r\n 1379 return cur\r\n 1380 except Exception as exc:\r\n\r\nDatabaseError: Execution failed on sql 'SELECT * FROM ./wine_composition_WineComposition.csv;': near \".\": syntax error\r\n```\r\n\r\nMaybe a message should warm user that `fetch` can't be use after installing dataset as a CSV file.\r\n\r\nKind regards\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\n\nfrom retriever.engines import choose_engine\nfrom retriever.lib.defaults import DATA_DIR, SCRIPT_WRITE_PATH\nfrom retriever.lib.scripts import SCRIPT_LIST\nfrom retriever.lib.engine_tools import name_matches\nfrom retriever.lib.repository import check_for_updates\n\n\ndef _install(args, use_cache, debug):\n \"\"\"Install datasets for retriever.\"\"\"\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n try:\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n else:\n message = \"Run retriever.datasets()to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine\n\n\ndef install_csv(dataset,\n table_name='{db}_{table}.csv',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into csv.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'csv',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_mysql(dataset, user='root', password='', host='localhost',\n port=3306, database_name='{db}', table_name='{db}.{table}',\n debug=False, use_cache=True):\n \"\"\"Install datasets into mysql.\"\"\"\n args = {\n 'command': 'install',\n 'database_name': database_name,\n 'engine': 'mysql',\n 'dataset': dataset,\n 'host': host,\n 'port': port,\n 'password': password,\n 'table_name': table_name,\n 'user': user\n }\n return _install(args, use_cache, debug)\n\n\ndef install_postgres(dataset, user='postgres', password='',\n host='localhost', port=5432, database='postgres',\n database_name='{db}', table_name='{db}.{table}', bbox=[],\n debug=False, use_cache=True):\n \"\"\"Install datasets into postgres.\"\"\"\n args = {\n 'command': 'install',\n 'database': database,\n 'database_name': database_name,\n 'engine': 'postgres',\n 'dataset': dataset,\n 'host': host,\n 'port': port,\n 'password': password,\n 'table_name': table_name,\n 'user': user,\n 'bbox': bbox\n }\n return _install(args, use_cache, debug)\n\n\ndef install_sqlite(dataset, file='sqlite.db',\n table_name='{db}_{table}',\n data_dir=DATA_DIR,\n debug=False, use_cache=True):\n \"\"\"Install datasets into sqlite.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'sqlite',\n 'file': file,\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_msaccess(dataset, file='access.mdb',\n table_name='[{db} {table}]',\n data_dir=DATA_DIR,\n debug=False, use_cache=True):\n \"\"\"Install datasets into msaccess.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'msaccess',\n 'file': file,\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_json(dataset,\n table_name='{db}_{table}.json',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into json.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'json',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_xml(dataset,\n table_name='{db}_{table}.xml',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into xml.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'xml',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n", "path": "retriever/lib/install.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nfrom collections import OrderedDict\n\nfrom retriever.engines import choose_engine\nfrom retriever.lib.defaults import DATA_DIR, SCRIPT_WRITE_PATH\nfrom retriever.lib.scripts import SCRIPT_LIST\nfrom retriever.lib.engine_tools import name_matches\nfrom retriever.lib.repository import check_for_updates\n\n\ndef _install(args, use_cache, debug):\n \"\"\"Install datasets for retriever.\"\"\"\n engine = choose_engine(args)\n engine.use_cache = use_cache\n\n script_list = SCRIPT_LIST()\n if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):\n check_for_updates()\n script_list = SCRIPT_LIST()\n data_sets_scripts = name_matches(script_list, args['dataset'])\n if data_sets_scripts:\n for data_sets_script in data_sets_scripts:\n try:\n engine.script_table_registry = OrderedDict()\n data_sets_script.download(engine, debug=debug)\n data_sets_script.engine.final_cleanup()\n except Exception as e:\n print(e)\n if debug:\n raise\n else:\n message = \"Run retriever.datasets()to list the currently available \" \\\n \"datasets.\"\n raise ValueError(message)\n return engine\n\n\ndef install_csv(dataset,\n table_name='{db}_{table}.csv',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into csv.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'csv',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_mysql(dataset, user='root', password='', host='localhost',\n port=3306, database_name='{db}', table_name='{db}.{table}',\n debug=False, use_cache=True):\n \"\"\"Install datasets into mysql.\"\"\"\n args = {\n 'command': 'install',\n 'database_name': database_name,\n 'engine': 'mysql',\n 'dataset': dataset,\n 'host': host,\n 'port': port,\n 'password': password,\n 'table_name': table_name,\n 'user': user\n }\n return _install(args, use_cache, debug)\n\n\ndef install_postgres(dataset, user='postgres', password='',\n host='localhost', port=5432, database='postgres',\n database_name='{db}', table_name='{db}.{table}', bbox=[],\n debug=False, use_cache=True):\n \"\"\"Install datasets into postgres.\"\"\"\n args = {\n 'command': 'install',\n 'database': database,\n 'database_name': database_name,\n 'engine': 'postgres',\n 'dataset': dataset,\n 'host': host,\n 'port': port,\n 'password': password,\n 'table_name': table_name,\n 'user': user,\n 'bbox': bbox\n }\n return _install(args, use_cache, debug)\n\n\ndef install_sqlite(dataset, file='sqlite.db',\n table_name='{db}_{table}',\n data_dir=DATA_DIR,\n debug=False, use_cache=True):\n \"\"\"Install datasets into sqlite.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'sqlite',\n 'file': file,\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_msaccess(dataset, file='access.mdb',\n table_name='[{db} {table}]',\n data_dir=DATA_DIR,\n debug=False, use_cache=True):\n \"\"\"Install datasets into msaccess.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'msaccess',\n 'file': file,\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_json(dataset,\n table_name='{db}_{table}.json',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into json.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'json',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n\n\ndef install_xml(dataset,\n table_name='{db}_{table}.xml',\n data_dir=DATA_DIR, debug=False, use_cache=True):\n \"\"\"Install datasets into xml.\"\"\"\n args = {\n 'command': 'install',\n 'dataset': dataset,\n 'engine': 'xml',\n 'table_name': table_name,\n 'data_dir': data_dir\n }\n return _install(args, use_cache, debug)\n", "path": "retriever/lib/install.py"}]}
| 2,742 | 150 |
gh_patches_debug_47934
|
rasdani/github-patches
|
git_diff
|
apluslms__a-plus-1004
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash in adding deviations due to unique constraint violation
Sometimes, when a teacher is adding deadline deviations, A+ crashes.
Based on the traceback below, this occurs when new deviations are supposed to override existing ones. It looks like the code mixes up creating new and modifying existing records in some cases.
Stripped traceback that highlights the deviation override view and the unique constraint violation when the new deviation is saved:
```
[ERROR/log] Internal Server Error: /<course>/<instance>/teachers/deadline-deviations/override/
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/django/db/backends/utils.py", line 84, in _execute
return self.cursor.execute(sql, params)
psycopg2.errors.UniqueViolation: duplicate key value violates unique constraint "exercise_deadlineruledeviatio_exercise_id_xyz_uniq"
DETAIL: Key (exercise_id, submitter_id)=(123, 456) already exists.
The above exception was the direct cause of the following exception:
File "/a-plus/deviations/viewbase.py", line 141, in form_valid
new_deviation.save()
django.db.utils.IntegrityError: duplicate key value violates unique constraint "exercise_deadlineruledeviatio_exercise_id_xyz_uniq"
DETAIL: Key (exercise_id, submitter_id)=(123, 456) already exists.
```
The code clearly tries to separate existing and new deviations:
https://github.com/apluslms/a-plus/blob/8245ca68e99a848ae61180f5b39d8a3a4925c76c/deviations/viewbase.py#L128-L141
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deviations/viewbase.py`
Content:
```
1 from itertools import groupby
2 from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
3
4 from django.db import models
5 from django.http import HttpRequest, HttpResponse
6 from django.contrib import messages
7 from django import forms
8 from django.shortcuts import get_object_or_404
9 from django.utils.text import format_lazy
10 from django.utils.translation import ugettext_lazy as _, ngettext
11
12 from course.models import CourseModule, UserTag
13 from course.viewbase import CourseInstanceMixin, CourseInstanceBaseView
14 from deviations.models import SubmissionRuleDeviation
15 from lib.viewbase import BaseFormView, BaseRedirectView
16 from authorization.permissions import ACCESS
17 from exercise.models import BaseExercise
18 from userprofile.models import UserProfile
19
20
21 class ListDeviationsView(CourseInstanceBaseView):
22 access_mode = ACCESS.TEACHER
23 deviation_model: Type[SubmissionRuleDeviation]
24
25 def get_common_objects(self) -> None:
26 super().get_common_objects()
27 all_deviations = self.deviation_model.objects.filter(
28 exercise__course_module__course_instance=self.instance
29 )
30 self.deviation_groups = get_deviation_groups(all_deviations)
31 self.note("deviation_groups")
32
33
34 class AddDeviationsView(CourseInstanceMixin, BaseFormView):
35 access_mode = ACCESS.TEACHER
36 deviation_model: Type[SubmissionRuleDeviation]
37 session_key: str
38
39 def get_form_kwargs(self) -> Dict[str, Any]:
40 kwargs = super().get_form_kwargs()
41 kwargs["instance"] = self.instance
42 return kwargs
43
44 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
45 exercises = get_exercises(form.cleaned_data)
46 submitters = get_submitters(form.cleaned_data)
47 existing_deviations = self.deviation_model.objects.filter(
48 exercise__in=exercises,
49 submitter__in=submitters,
50 )
51
52 if existing_deviations:
53 # Some deviations already existed. Use OverrideDeviationsView to
54 # confirm which ones the user wants to override. Store the form
55 # values in the current session, so they can be used afterwards.
56 self.success_url = self.deviation_model.get_override_url(self.instance)
57 self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)
58 else:
59 self.success_url = self.deviation_model.get_list_url(self.instance)
60 for exercise in exercises:
61 for submitter in submitters:
62 new_deviation = self.deviation_model(
63 exercise=exercise,
64 submitter=submitter,
65 granter=self.request.user.userprofile,
66 )
67 new_deviation.update_by_form(form.cleaned_data)
68 new_deviation.save()
69
70 return super().form_valid(form)
71
72 def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:
73 """
74 Convert input form data into serializable values that can be stored in
75 the session cache.
76 """
77 result = {}
78 for key in ('exercise', 'module', 'submitter', 'submitter_tag'):
79 result[key] = [i.id for i in form_data.get(key, [])]
80 return result
81
82
83 class OverrideDeviationsView(CourseInstanceMixin, BaseFormView):
84 access_mode = ACCESS.TEACHER
85 # form_class is not really used, but it is required by the FormView.
86 # The form contains only checkboxes and the user input is validated in
87 # the form_valid method. The form HTML is manually written in the template.
88 form_class = forms.Form
89 deviation_model: Type[SubmissionRuleDeviation]
90 session_key: str
91
92 def get_success_url(self) -> str:
93 return self.deviation_model.get_list_url(self.instance)
94
95 def get_common_objects(self) -> None:
96 super().get_common_objects()
97 self.session_data = self.deserialize_session_data(self.request.session[self.session_key])
98 self.exercises = get_exercises(self.session_data)
99 self.submitters = get_submitters(self.session_data)
100 self.existing_deviations = self.deviation_model.objects.filter(
101 exercise__in=self.exercises,
102 submitter__in=self.submitters,
103 )
104 self.deviation_groups = get_deviation_groups(self.existing_deviations)
105 self.note("session_data", "exercises", "submitters", "existing_deviations", "deviation_groups")
106
107 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
108 override_deviations = set()
109 deviation_list = self.request.POST.getlist('override')
110 for id_pair in deviation_list:
111 try:
112 submitter_id, exercise_id = id_pair.split('.')
113 submitter_id, exercise_id = int(submitter_id), int(exercise_id)
114 override_deviations.add((submitter_id, exercise_id))
115 except ValueError:
116 messages.error(self.request,
117 format_lazy(
118 _("INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}"),
119 id=id_pair,
120 )
121 )
122 continue
123
124 existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}
125
126 for exercise in self.exercises:
127 for submitter in self.submitters:
128 existing_deviation = existing_deviations.get((submitter.id, exercise.id))
129 if existing_deviation is not None:
130 if (submitter.id, exercise.id) in override_deviations:
131 existing_deviation.granter = self.request.user.userprofile
132 existing_deviation.update_by_form(self.session_data)
133 existing_deviation.save()
134 else:
135 new_deviation = self.deviation_model(
136 exercise=exercise,
137 submitter=submitter,
138 granter=self.request.user.userprofile,
139 )
140 new_deviation.update_by_form(self.session_data)
141 new_deviation.save()
142
143 del self.request.session[self.session_key]
144 return super().form_valid(form)
145
146 def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:
147 """
148 Convert serialized session data back into its original representation.
149 """
150 result = {
151 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),
152 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),
153 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),
154 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),
155 }
156 return result
157
158
159 class RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):
160 access_mode = ACCESS.TEACHER
161 deviation_model: Type[SubmissionRuleDeviation]
162
163 def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:
164 deviations = self.deviation_model.objects.filter(
165 id__in=request.POST.getlist("id"),
166 exercise__course_module__course_instance=self.instance,
167 )
168 for deviation in deviations:
169 deviation.delete()
170 if request.is_ajax():
171 return HttpResponse(status=204)
172 return self.redirect(self.deviation_model.get_list_url(self.instance))
173
174
175 class RemoveDeviationsView(CourseInstanceMixin, BaseFormView):
176 access_mode = ACCESS.TEACHER
177 deviation_model: Type[SubmissionRuleDeviation]
178
179 def get_form_kwargs(self) -> Dict[str, Any]:
180 kwargs = super().get_form_kwargs()
181 kwargs["instance"] = self.instance
182 return kwargs
183
184 def get_success_url(self) -> str:
185 return self.deviation_model.get_list_url(self.instance)
186
187 def form_valid(self, form: forms.BaseForm) -> HttpResponse:
188 number_of_removed = 0
189 deviations = self.deviation_model.objects.filter(
190 exercise__in=get_exercises(form.cleaned_data),
191 submitter__in=get_submitters(form.cleaned_data),
192 )
193 for deviation in deviations:
194 deviation.delete()
195 number_of_removed += 1
196 if number_of_removed == 0:
197 messages.warning(self.request, _("NOTHING_REMOVED"))
198 else:
199 message = format_lazy(
200 ngettext(
201 'REMOVED_DEVIATION -- {count}',
202 'REMOVED_DEVIATIONS -- {count}',
203 number_of_removed
204 ),
205 count=number_of_removed,
206 )
207 messages.info(self.request, message)
208 return super().form_valid(form)
209
210
211 def get_deviation_groups(
212 all_deviations: models.QuerySet[SubmissionRuleDeviation],
213 ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:
214 """
215 Group the deviations by user and module.
216
217 Grouping condition: deviations can be grouped if the user has been
218 granted the same deviation (based on the `is_equal` method) for all
219 exercises in the module.
220
221 The returned tuples contain the following values:
222 1. List of deviations with the same user and module.
223 2. Boolean representing whether the deviations in the list can be
224 displayed as a group (i.e. the grouping condition is satisfied).
225 3. An id that uniquely identifies the group of deviations.
226 """
227 # Find the number of exercises in each module.
228 course_instances = all_deviations.values_list('exercise__course_module__course_instance', flat=True)
229 exercise_counts = (
230 BaseExercise.objects.filter(
231 course_module__course_instance__in=course_instances
232 )
233 .order_by()
234 .values('course_module_id')
235 .annotate(count=models.Count('*'))
236 )
237 exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}
238
239 ordered_deviations = (
240 all_deviations
241 .select_related(
242 'submitter', 'submitter__user',
243 'granter', 'granter__user',
244 'exercise', 'exercise__course_module',
245 )
246 # parent is prefetched because there may be multiple ancestors, and
247 # they are needed for building the deviation's URL.
248 .prefetch_related('exercise__parent')
249 .order_by('submitter', 'exercise__course_module')
250 )
251
252 deviation_groups = groupby(
253 ordered_deviations,
254 lambda obj: (obj.submitter, obj.exercise.course_module),
255 )
256 for (submitter, module), deviations_iter in deviation_groups:
257 deviations = list(deviations_iter)
258 can_group = True
259 if len(deviations) < 2:
260 # Group must have at least 2 deviations.
261 can_group = False
262 else:
263 group_exercises = set()
264 # Check that the same deviation has been granted for all exercises.
265 for deviation in deviations:
266 if not deviation.is_groupable(deviations[0]):
267 can_group = False
268 break
269 group_exercises.add(deviation.exercise.id)
270 else:
271 if len(group_exercises) != exercise_count_by_module[module.id]:
272 # The number of exercises that have deviations doesn't
273 # match the number of exercises in the module, so there
274 # are some exercises that don't have a deviation.
275 can_group = False
276 group_id = f"{deviations[0].submitter.id}.{module.id}" if can_group else None
277 yield (deviations, can_group, group_id)
278
279
280 def get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:
281 """
282 Get the exercises that match the input form's `exercise` and `module`
283 fields.
284 """
285 return BaseExercise.objects.filter(
286 models.Q(id__in=form_data.get('exercise', []))
287 | models.Q(course_module__in=form_data.get('module', []))
288 )
289
290
291 def get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:
292 """
293 Get the submitters that match the input form's `submitter` and
294 `submitter_tag` fields.
295 """
296 return UserProfile.objects.filter(
297 models.Q(id__in=form_data.get('submitter', []))
298 | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))
299 )
300
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deviations/viewbase.py b/deviations/viewbase.py
--- a/deviations/viewbase.py
+++ b/deviations/viewbase.py
@@ -296,4 +296,4 @@
return UserProfile.objects.filter(
models.Q(id__in=form_data.get('submitter', []))
| models.Q(taggings__tag__in=form_data.get('submitter_tag', []))
- )
+ ).distinct()
|
{"golden_diff": "diff --git a/deviations/viewbase.py b/deviations/viewbase.py\n--- a/deviations/viewbase.py\n+++ b/deviations/viewbase.py\n@@ -296,4 +296,4 @@\n return UserProfile.objects.filter(\n models.Q(id__in=form_data.get('submitter', []))\n | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))\n- )\n+ ).distinct()\n", "issue": "Crash in adding deviations due to unique constraint violation\nSometimes, when a teacher is adding deadline deviations, A+ crashes.\r\n\r\nBased on the traceback below, this occurs when new deviations are supposed to override existing ones. It looks like the code mixes up creating new and modifying existing records in some cases.\r\n\r\nStripped traceback that highlights the deviation override view and the unique constraint violation when the new deviation is saved:\r\n\r\n```\r\n[ERROR/log] Internal Server Error: /<course>/<instance>/teachers/deadline-deviations/override/\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/dist-packages/django/db/backends/utils.py\", line 84, in _execute\r\n return self.cursor.execute(sql, params)\r\npsycopg2.errors.UniqueViolation: duplicate key value violates unique constraint \"exercise_deadlineruledeviatio_exercise_id_xyz_uniq\"\r\nDETAIL: Key (exercise_id, submitter_id)=(123, 456) already exists.\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\n File \"/a-plus/deviations/viewbase.py\", line 141, in form_valid\r\n new_deviation.save()\r\ndjango.db.utils.IntegrityError: duplicate key value violates unique constraint \"exercise_deadlineruledeviatio_exercise_id_xyz_uniq\"\r\nDETAIL: Key (exercise_id, submitter_id)=(123, 456) already exists.\r\n```\r\n\r\nThe code clearly tries to separate existing and new deviations:\r\nhttps://github.com/apluslms/a-plus/blob/8245ca68e99a848ae61180f5b39d8a3a4925c76c/deviations/viewbase.py#L128-L141\n", "before_files": [{"content": "from itertools import groupby\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Type\n\nfrom django.db import models\nfrom django.http import HttpRequest, HttpResponse\nfrom django.contrib import messages\nfrom django import forms\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import ugettext_lazy as _, ngettext\n\nfrom course.models import CourseModule, UserTag\nfrom course.viewbase import CourseInstanceMixin, CourseInstanceBaseView\nfrom deviations.models import SubmissionRuleDeviation\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom authorization.permissions import ACCESS\nfrom exercise.models import BaseExercise\nfrom userprofile.models import UserProfile\n\n\nclass ListDeviationsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n all_deviations = self.deviation_model.objects.filter(\n exercise__course_module__course_instance=self.instance\n )\n self.deviation_groups = get_deviation_groups(all_deviations)\n self.note(\"deviation_groups\")\n\n\nclass AddDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n exercises = get_exercises(form.cleaned_data)\n submitters = get_submitters(form.cleaned_data)\n existing_deviations = self.deviation_model.objects.filter(\n exercise__in=exercises,\n submitter__in=submitters,\n )\n\n if existing_deviations:\n # Some deviations already existed. Use OverrideDeviationsView to\n # confirm which ones the user wants to override. Store the form\n # values in the current session, so they can be used afterwards.\n self.success_url = self.deviation_model.get_override_url(self.instance)\n self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)\n else:\n self.success_url = self.deviation_model.get_list_url(self.instance)\n for exercise in exercises:\n for submitter in submitters:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(form.cleaned_data)\n new_deviation.save()\n\n return super().form_valid(form)\n\n def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert input form data into serializable values that can be stored in\n the session cache.\n \"\"\"\n result = {}\n for key in ('exercise', 'module', 'submitter', 'submitter_tag'):\n result[key] = [i.id for i in form_data.get(key, [])]\n return result\n\n\nclass OverrideDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n # form_class is not really used, but it is required by the FormView.\n # The form contains only checkboxes and the user input is validated in\n # the form_valid method. The form HTML is manually written in the template.\n form_class = forms.Form\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n self.session_data = self.deserialize_session_data(self.request.session[self.session_key])\n self.exercises = get_exercises(self.session_data)\n self.submitters = get_submitters(self.session_data)\n self.existing_deviations = self.deviation_model.objects.filter(\n exercise__in=self.exercises,\n submitter__in=self.submitters,\n )\n self.deviation_groups = get_deviation_groups(self.existing_deviations)\n self.note(\"session_data\", \"exercises\", \"submitters\", \"existing_deviations\", \"deviation_groups\")\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n override_deviations = set()\n deviation_list = self.request.POST.getlist('override')\n for id_pair in deviation_list:\n try:\n submitter_id, exercise_id = id_pair.split('.')\n submitter_id, exercise_id = int(submitter_id), int(exercise_id)\n override_deviations.add((submitter_id, exercise_id))\n except ValueError:\n messages.error(self.request,\n format_lazy(\n _(\"INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}\"),\n id=id_pair,\n )\n )\n continue\n\n existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}\n\n for exercise in self.exercises:\n for submitter in self.submitters:\n existing_deviation = existing_deviations.get((submitter.id, exercise.id))\n if existing_deviation is not None:\n if (submitter.id, exercise.id) in override_deviations:\n existing_deviation.granter = self.request.user.userprofile\n existing_deviation.update_by_form(self.session_data)\n existing_deviation.save()\n else:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(self.session_data)\n new_deviation.save()\n\n del self.request.session[self.session_key]\n return super().form_valid(form)\n\n def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert serialized session data back into its original representation.\n \"\"\"\n result = {\n 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),\n 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),\n 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),\n 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),\n }\n return result\n\n\nclass RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n deviations = self.deviation_model.objects.filter(\n id__in=request.POST.getlist(\"id\"),\n exercise__course_module__course_instance=self.instance,\n )\n for deviation in deviations:\n deviation.delete()\n if request.is_ajax():\n return HttpResponse(status=204)\n return self.redirect(self.deviation_model.get_list_url(self.instance))\n\n\nclass RemoveDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n number_of_removed = 0\n deviations = self.deviation_model.objects.filter(\n exercise__in=get_exercises(form.cleaned_data),\n submitter__in=get_submitters(form.cleaned_data),\n )\n for deviation in deviations:\n deviation.delete()\n number_of_removed += 1\n if number_of_removed == 0:\n messages.warning(self.request, _(\"NOTHING_REMOVED\"))\n else:\n message = format_lazy(\n ngettext(\n 'REMOVED_DEVIATION -- {count}',\n 'REMOVED_DEVIATIONS -- {count}',\n number_of_removed\n ),\n count=number_of_removed,\n )\n messages.info(self.request, message)\n return super().form_valid(form)\n\n\ndef get_deviation_groups(\n all_deviations: models.QuerySet[SubmissionRuleDeviation],\n ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:\n \"\"\"\n Group the deviations by user and module.\n\n Grouping condition: deviations can be grouped if the user has been\n granted the same deviation (based on the `is_equal` method) for all\n exercises in the module.\n\n The returned tuples contain the following values:\n 1. List of deviations with the same user and module.\n 2. Boolean representing whether the deviations in the list can be\n displayed as a group (i.e. the grouping condition is satisfied).\n 3. An id that uniquely identifies the group of deviations.\n \"\"\"\n # Find the number of exercises in each module.\n course_instances = all_deviations.values_list('exercise__course_module__course_instance', flat=True)\n exercise_counts = (\n BaseExercise.objects.filter(\n course_module__course_instance__in=course_instances\n )\n .order_by()\n .values('course_module_id')\n .annotate(count=models.Count('*'))\n )\n exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}\n\n ordered_deviations = (\n all_deviations\n .select_related(\n 'submitter', 'submitter__user',\n 'granter', 'granter__user',\n 'exercise', 'exercise__course_module',\n )\n # parent is prefetched because there may be multiple ancestors, and\n # they are needed for building the deviation's URL.\n .prefetch_related('exercise__parent')\n .order_by('submitter', 'exercise__course_module')\n )\n\n deviation_groups = groupby(\n ordered_deviations,\n lambda obj: (obj.submitter, obj.exercise.course_module),\n )\n for (submitter, module), deviations_iter in deviation_groups:\n deviations = list(deviations_iter)\n can_group = True\n if len(deviations) < 2:\n # Group must have at least 2 deviations.\n can_group = False\n else:\n group_exercises = set()\n # Check that the same deviation has been granted for all exercises.\n for deviation in deviations:\n if not deviation.is_groupable(deviations[0]):\n can_group = False\n break\n group_exercises.add(deviation.exercise.id)\n else:\n if len(group_exercises) != exercise_count_by_module[module.id]:\n # The number of exercises that have deviations doesn't\n # match the number of exercises in the module, so there\n # are some exercises that don't have a deviation.\n can_group = False\n group_id = f\"{deviations[0].submitter.id}.{module.id}\" if can_group else None\n yield (deviations, can_group, group_id)\n\n\ndef get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:\n \"\"\"\n Get the exercises that match the input form's `exercise` and `module`\n fields.\n \"\"\"\n return BaseExercise.objects.filter(\n models.Q(id__in=form_data.get('exercise', []))\n | models.Q(course_module__in=form_data.get('module', []))\n )\n\n\ndef get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:\n \"\"\"\n Get the submitters that match the input form's `submitter` and\n `submitter_tag` fields.\n \"\"\"\n return UserProfile.objects.filter(\n models.Q(id__in=form_data.get('submitter', []))\n | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))\n )\n", "path": "deviations/viewbase.py"}], "after_files": [{"content": "from itertools import groupby\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, Type\n\nfrom django.db import models\nfrom django.http import HttpRequest, HttpResponse\nfrom django.contrib import messages\nfrom django import forms\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import ugettext_lazy as _, ngettext\n\nfrom course.models import CourseModule, UserTag\nfrom course.viewbase import CourseInstanceMixin, CourseInstanceBaseView\nfrom deviations.models import SubmissionRuleDeviation\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom authorization.permissions import ACCESS\nfrom exercise.models import BaseExercise\nfrom userprofile.models import UserProfile\n\n\nclass ListDeviationsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n all_deviations = self.deviation_model.objects.filter(\n exercise__course_module__course_instance=self.instance\n )\n self.deviation_groups = get_deviation_groups(all_deviations)\n self.note(\"deviation_groups\")\n\n\nclass AddDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n exercises = get_exercises(form.cleaned_data)\n submitters = get_submitters(form.cleaned_data)\n existing_deviations = self.deviation_model.objects.filter(\n exercise__in=exercises,\n submitter__in=submitters,\n )\n\n if existing_deviations:\n # Some deviations already existed. Use OverrideDeviationsView to\n # confirm which ones the user wants to override. Store the form\n # values in the current session, so they can be used afterwards.\n self.success_url = self.deviation_model.get_override_url(self.instance)\n self.request.session[self.session_key] = self.serialize_session_data(form.cleaned_data)\n else:\n self.success_url = self.deviation_model.get_list_url(self.instance)\n for exercise in exercises:\n for submitter in submitters:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(form.cleaned_data)\n new_deviation.save()\n\n return super().form_valid(form)\n\n def serialize_session_data(self, form_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert input form data into serializable values that can be stored in\n the session cache.\n \"\"\"\n result = {}\n for key in ('exercise', 'module', 'submitter', 'submitter_tag'):\n result[key] = [i.id for i in form_data.get(key, [])]\n return result\n\n\nclass OverrideDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n # form_class is not really used, but it is required by the FormView.\n # The form contains only checkboxes and the user input is validated in\n # the form_valid method. The form HTML is manually written in the template.\n form_class = forms.Form\n deviation_model: Type[SubmissionRuleDeviation]\n session_key: str\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def get_common_objects(self) -> None:\n super().get_common_objects()\n self.session_data = self.deserialize_session_data(self.request.session[self.session_key])\n self.exercises = get_exercises(self.session_data)\n self.submitters = get_submitters(self.session_data)\n self.existing_deviations = self.deviation_model.objects.filter(\n exercise__in=self.exercises,\n submitter__in=self.submitters,\n )\n self.deviation_groups = get_deviation_groups(self.existing_deviations)\n self.note(\"session_data\", \"exercises\", \"submitters\", \"existing_deviations\", \"deviation_groups\")\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n override_deviations = set()\n deviation_list = self.request.POST.getlist('override')\n for id_pair in deviation_list:\n try:\n submitter_id, exercise_id = id_pair.split('.')\n submitter_id, exercise_id = int(submitter_id), int(exercise_id)\n override_deviations.add((submitter_id, exercise_id))\n except ValueError:\n messages.error(self.request,\n format_lazy(\n _(\"INVALID_EXERCISE_OR_SUBMITTER_ID -- {id}\"),\n id=id_pair,\n )\n )\n continue\n\n existing_deviations = {(d.submitter_id, d.exercise_id): d for d in self.existing_deviations}\n\n for exercise in self.exercises:\n for submitter in self.submitters:\n existing_deviation = existing_deviations.get((submitter.id, exercise.id))\n if existing_deviation is not None:\n if (submitter.id, exercise.id) in override_deviations:\n existing_deviation.granter = self.request.user.userprofile\n existing_deviation.update_by_form(self.session_data)\n existing_deviation.save()\n else:\n new_deviation = self.deviation_model(\n exercise=exercise,\n submitter=submitter,\n granter=self.request.user.userprofile,\n )\n new_deviation.update_by_form(self.session_data)\n new_deviation.save()\n\n del self.request.session[self.session_key]\n return super().form_valid(form)\n\n def deserialize_session_data(self, session_data: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Convert serialized session data back into its original representation.\n \"\"\"\n result = {\n 'exercise': BaseExercise.objects.filter(id__in=session_data.get('exercise', [])),\n 'module': CourseModule.objects.filter(id__in=session_data.get('module', [])),\n 'submitter': UserProfile.objects.filter(id__in=session_data.get('submitter', [])),\n 'submitter_tag': UserTag.objects.filter(id__in=session_data.get('submitter_tag', [])),\n }\n return result\n\n\nclass RemoveDeviationsByIDView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def post(self, request: HttpRequest, *args: Any, **kwargs: Any) -> HttpResponse:\n deviations = self.deviation_model.objects.filter(\n id__in=request.POST.getlist(\"id\"),\n exercise__course_module__course_instance=self.instance,\n )\n for deviation in deviations:\n deviation.delete()\n if request.is_ajax():\n return HttpResponse(status=204)\n return self.redirect(self.deviation_model.get_list_url(self.instance))\n\n\nclass RemoveDeviationsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n deviation_model: Type[SubmissionRuleDeviation]\n\n def get_form_kwargs(self) -> Dict[str, Any]:\n kwargs = super().get_form_kwargs()\n kwargs[\"instance\"] = self.instance\n return kwargs\n\n def get_success_url(self) -> str:\n return self.deviation_model.get_list_url(self.instance)\n\n def form_valid(self, form: forms.BaseForm) -> HttpResponse:\n number_of_removed = 0\n deviations = self.deviation_model.objects.filter(\n exercise__in=get_exercises(form.cleaned_data),\n submitter__in=get_submitters(form.cleaned_data),\n )\n for deviation in deviations:\n deviation.delete()\n number_of_removed += 1\n if number_of_removed == 0:\n messages.warning(self.request, _(\"NOTHING_REMOVED\"))\n else:\n message = format_lazy(\n ngettext(\n 'REMOVED_DEVIATION -- {count}',\n 'REMOVED_DEVIATIONS -- {count}',\n number_of_removed\n ),\n count=number_of_removed,\n )\n messages.info(self.request, message)\n return super().form_valid(form)\n\n\ndef get_deviation_groups(\n all_deviations: models.QuerySet[SubmissionRuleDeviation],\n ) -> Iterable[Tuple[List[SubmissionRuleDeviation], bool, Optional[str]]]:\n \"\"\"\n Group the deviations by user and module.\n\n Grouping condition: deviations can be grouped if the user has been\n granted the same deviation (based on the `is_equal` method) for all\n exercises in the module.\n\n The returned tuples contain the following values:\n 1. List of deviations with the same user and module.\n 2. Boolean representing whether the deviations in the list can be\n displayed as a group (i.e. the grouping condition is satisfied).\n 3. An id that uniquely identifies the group of deviations.\n \"\"\"\n # Find the number of exercises in each module.\n course_instances = all_deviations.values_list('exercise__course_module__course_instance', flat=True)\n exercise_counts = (\n BaseExercise.objects.filter(\n course_module__course_instance__in=course_instances\n )\n .order_by()\n .values('course_module_id')\n .annotate(count=models.Count('*'))\n )\n exercise_count_by_module = {row['course_module_id']: row['count'] for row in exercise_counts}\n\n ordered_deviations = (\n all_deviations\n .select_related(\n 'submitter', 'submitter__user',\n 'granter', 'granter__user',\n 'exercise', 'exercise__course_module',\n )\n # parent is prefetched because there may be multiple ancestors, and\n # they are needed for building the deviation's URL.\n .prefetch_related('exercise__parent')\n .order_by('submitter', 'exercise__course_module')\n )\n\n deviation_groups = groupby(\n ordered_deviations,\n lambda obj: (obj.submitter, obj.exercise.course_module),\n )\n for (submitter, module), deviations_iter in deviation_groups:\n deviations = list(deviations_iter)\n can_group = True\n if len(deviations) < 2:\n # Group must have at least 2 deviations.\n can_group = False\n else:\n group_exercises = set()\n # Check that the same deviation has been granted for all exercises.\n for deviation in deviations:\n if not deviation.is_groupable(deviations[0]):\n can_group = False\n break\n group_exercises.add(deviation.exercise.id)\n else:\n if len(group_exercises) != exercise_count_by_module[module.id]:\n # The number of exercises that have deviations doesn't\n # match the number of exercises in the module, so there\n # are some exercises that don't have a deviation.\n can_group = False\n group_id = f\"{deviations[0].submitter.id}.{module.id}\" if can_group else None\n yield (deviations, can_group, group_id)\n\n\ndef get_exercises(form_data: Dict[str, Any]) -> models.QuerySet[BaseExercise]:\n \"\"\"\n Get the exercises that match the input form's `exercise` and `module`\n fields.\n \"\"\"\n return BaseExercise.objects.filter(\n models.Q(id__in=form_data.get('exercise', []))\n | models.Q(course_module__in=form_data.get('module', []))\n )\n\n\ndef get_submitters(form_data: Dict[str, Any]) -> models.QuerySet[UserProfile]:\n \"\"\"\n Get the submitters that match the input form's `submitter` and\n `submitter_tag` fields.\n \"\"\"\n return UserProfile.objects.filter(\n models.Q(id__in=form_data.get('submitter', []))\n | models.Q(taggings__tag__in=form_data.get('submitter_tag', []))\n ).distinct()\n", "path": "deviations/viewbase.py"}]}
| 3,992 | 96 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.