problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_14331
rasdani/github-patches
git_diff
facebookresearch__fairscale-795
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The Argument broadcast_fp16 is not working on OSS ## πŸ› Bug The Argument `broadcast_fp16` is not working on `OSS`. ## Command ## To Reproduce Steps to reproduce the behavior: 1. Add one print line in fairscale's source code [here](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/optim/oss.py#L603) ``` for device in self.buckets.keys(): for dst_rank, bucket in self.buckets[device].items(): print(bucket.buffer.dtype, bucket.buffer.shape) work_handles.append( ``` 2. Run a test script as below: ``` import sys import pdb import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP from fairscale.optim.oss import OSS from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP CUDA_DEVICE = [0, 1] HIDDEN_SIZE = 8 class ToyModel(torch.nn.Module): def __init__(self): super(ToyModel, self).__init__() self.net1 = torch.nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE) self.relu = torch.nn.ReLU() self.net2 = torch.nn.Linear(HIDDEN_SIZE, 5) def forward(self, x): return self.net2(self.relu(self.net1(x))) def main(rank, world_size): dist.init_process_group( backend='nccl', init_method="tcp://localhost:29501", rank=rank, world_size=world_size) device = CUDA_DEVICE[rank] torch.manual_seed(rank + 1007) torch.cuda.set_device(device) torch.cuda.manual_seed(rank + 1007) print('Rank: %d/%d Device: %s' % (rank, world_size, device)) model = ToyModel().to(device) loss_fn = torch.nn.MSELoss() # ddp_model = DDP(model, device_ids=[device]) # optimizer = torch.optim.SGD(ddp_model.parameters(), lr=0.1) optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=0.1, broadcast_fp16=True) ddp_model = ShardedDDP(model, optimizer) inputs = torch.randn(2, HIDDEN_SIZE).to(device) print('inputs', rank, inputs) optimizer.zero_grad() outputs = ddp_model(inputs) labels = torch.randn(2, 5).to(device) loss_fn(outputs, labels).backward() optimizer.step() dist.destroy_process_group() if __name__ == "__main__": WORLD_SIZE = len(CUDA_DEVICE) mp.spawn( main, args=(WORLD_SIZE,), nprocs=WORLD_SIZE, join=True, ) ``` And the output is: ``` ... torch.float32 torch.Size([64]) torch.float32 torch.Size([53]) torch.float32 torch.Size([64]) torch.float32 torch.Size([53]) ``` Note that we have already set the argument `broadcast_fp16` to be true, but fairscale still boardcasts float32 tensor. ## Reason The Tensor.to [(code here)](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) method is not in-place operation. ## Posible Solution Change [fairscale/nn/misc/param_bucket.py:35](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) to ``` self.buffer = self.buffer.to(device, dtype, non_blocking) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fairscale/nn/misc/param_bucket.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 from typing import Any, Callable, List, Optional, Union 7 8 import torch 9 10 11 class Bucket: 12 """ 13 Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors 14 """ 15 16 def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None: 17 self._params: List[torch.Tensor] = [] 18 self._param_ids: List[int] = [] 19 self._fill = 0 20 21 # The actual flat tensor 22 self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device) 23 24 def to( # type: ignore 25 self, 26 device: Optional[Union[int, torch.device]], 27 dtype: Optional[torch.dtype] = None, 28 non_blocking: bool = False, 29 keep_param_alignment: bool = True, 30 ) -> "ParamBucket": 31 """ 32 Move the underlying buffer 33 """ 34 assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it" 35 self.buffer.to(device, dtype, non_blocking) 36 37 38 class ParamBucket(Bucket): 39 """ 40 Helper class to simplify the handling of parameter buckets 41 """ 42 43 def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None: 44 super().__init__(size, dtype, device) 45 46 def to( # type: ignore 47 self, 48 device: Optional[Union[int, torch.device]], 49 dtype: Optional[torch.dtype] = None, 50 non_blocking: bool = False, 51 keep_param_alignment: bool = True, 52 ) -> "ParamBucket": 53 """ 54 Move the underlying buffer 55 """ 56 super().to(device, dtype, non_blocking) 57 58 if keep_param_alignment: 59 self._reattach_params() 60 61 @torch.no_grad() 62 def add_param(self, param: torch.Tensor) -> None: 63 """ 64 Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer 65 """ 66 67 assert id(param) not in self._param_ids, "The same param cannot be checked in twice" 68 69 self._add_param_as_view(param) 70 self._params.append(param) 71 self._param_ids.append(id(param)) 72 73 @torch.no_grad() 74 def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None: 75 assert self.buffer is not None 76 assert ( 77 param.dtype == self.buffer.dtype 78 ), f"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}" 79 assert ( 80 param.device == self.buffer.device 81 ), f"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}" 82 83 fill_next = self._fill + param.numel() 84 assert fill_next <= self.buffer.numel() 85 86 # Copy the current param value 87 if keep_existing_value: 88 self.buffer[self._fill : fill_next].copy_(param.data.flatten()) 89 param.data = self.buffer[self._fill : fill_next].view_as(param.data) 90 self._fill = fill_next 91 92 @torch.no_grad() 93 def _reattach_params(self) -> None: 94 """ 95 Given the parameters which have been registered previously, rebuild the whole bucket 96 """ 97 assert len(self._params) > 0 98 99 self._fill = 0 100 for p in self._params: 101 self._add_param_as_view(p, keep_existing_value=False) 102 103 104 class GradBucket(Bucket): 105 """ 106 Helper class to simplify the handling of gradient buckets 107 """ 108 109 def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None: 110 super().__init__(size, dtype, device) 111 112 self._max_size = size 113 self._is_collapsed = False 114 115 self.params_checked_in = 0 116 self.destination = destination 117 self.sent = True 118 self.callback: Optional[Callable[[Any], None]] = None 119 120 def reset_checked_in(self) -> None: 121 """ Reset the counter of the parameter grads which have been checked in 122 """ 123 self.params_checked_in = 0 124 self.sent = False 125 126 @property 127 def all_checked_in(self) -> bool: 128 """ Have all the expected gradient check-in happened ?""" 129 return len(self._params) == self.params_checked_in 130 131 def can_add_grad_view(self, param: torch.Tensor) -> bool: 132 """ Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ? 133 """ 134 return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids 135 136 def to( # type: ignore 137 self, 138 device: Optional[Union[int, torch.device]], 139 dtype: Optional[torch.dtype] = None, 140 non_blocking: bool = False, 141 keep_param_alignment: bool = True, 142 ) -> "GradBucket": 143 """ 144 Move the underlying buffer 145 """ 146 if self._is_collapsed: 147 self.rebuild() 148 149 super().to(device, dtype, non_blocking) 150 151 if keep_param_alignment: 152 self._reattach_grads() 153 154 def zero(self) -> None: 155 """ 156 Set all the grads to zero 157 """ 158 self.buffer.fill_(0.0) 159 160 @torch.no_grad() 161 def add_grad(self, param: torch.Tensor) -> None: 162 """ 163 Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer 164 """ 165 166 assert id(param) not in self._param_ids, "The same gradients cannot be checked in twice" 167 168 if param.grad is None: 169 param.grad = torch.zeros_like(param) 170 171 self._add_grad_as_view(param) 172 self._params.append(param) 173 self._param_ids.append(id(param)) 174 175 @torch.no_grad() 176 def collapse(self) -> None: 177 """ 178 Release the buffer from memory. The bucket will need to be rebuilt before use 179 """ 180 if not self._is_collapsed: 181 for p in self._params: 182 assert p.grad is not None 183 p.grad.detach_() 184 p.grad = None 185 186 self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device) 187 self._fill = 0 188 self.params_checked_in = 0 189 self._is_collapsed = True 190 191 @torch.no_grad() 192 def rebuild(self) -> None: 193 """ 194 Given the parameter gradients which have been registered previously, rebuild the whole bucket 195 """ 196 assert len(self._params) > 0 197 198 if self._is_collapsed: 199 self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device) 200 201 for p in self._params: 202 self._add_grad_as_view(p) 203 204 self._is_collapsed = False 205 206 @torch.no_grad() 207 def shrink(self) -> None: 208 """ 209 Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory 210 """ 211 assert self.buffer.numel() > 0, "Cannot shrink a collapsed bucket, please rebuild" 212 213 self.buffer = self.buffer.resize_(self._fill).clone() 214 self._fill = 0 215 for p in self._params: 216 self._add_grad_as_view(p) 217 218 self._max_size = self._fill 219 220 @torch.no_grad() 221 def _reattach_grads(self) -> None: 222 """ 223 Given the parameters gradients which have been registered previously, rebuild the whole bucket 224 """ 225 assert len(self._params) > 0 226 227 self._fill = 0 228 for p in self._params: 229 self._add_grad_as_view(p, keep_existing_value=False) 230 231 @torch.no_grad() 232 def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None: 233 assert self.buffer.numel() > 0, "Cannot add a gradient to a collapsed bucket, please rebuild" 234 assert param.dtype == self.buffer.dtype 235 assert param.device == self.buffer.device 236 237 fill_next = self._fill + param.numel() 238 assert fill_next <= self.buffer.numel() 239 240 # Copy the current grad value, if any 241 if param.grad is not None: 242 # keep param.grad in place 243 if keep_existing_value: 244 self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten()) 245 param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data) 246 else: 247 param.grad = self.buffer[self._fill : fill_next].view_as(param.data) 248 self._fill = fill_next 249 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fairscale/nn/misc/param_bucket.py b/fairscale/nn/misc/param_bucket.py --- a/fairscale/nn/misc/param_bucket.py +++ b/fairscale/nn/misc/param_bucket.py @@ -32,7 +32,7 @@ Move the underlying buffer """ assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it" - self.buffer.to(device, dtype, non_blocking) + self.buffer = self.buffer.to(device, dtype, non_blocking) class ParamBucket(Bucket): @@ -98,6 +98,8 @@ self._fill = 0 for p in self._params: + if p.dtype != self.buffer.dtype: + p.data = p.data.to(self.buffer.dtype) self._add_param_as_view(p, keep_existing_value=False)
{"golden_diff": "diff --git a/fairscale/nn/misc/param_bucket.py b/fairscale/nn/misc/param_bucket.py\n--- a/fairscale/nn/misc/param_bucket.py\n+++ b/fairscale/nn/misc/param_bucket.py\n@@ -32,7 +32,7 @@\n Move the underlying buffer\n \"\"\"\n assert self.buffer is not None, \"Cannot move a collapsed bucket, please rebuild it\"\n- self.buffer.to(device, dtype, non_blocking)\n+ self.buffer = self.buffer.to(device, dtype, non_blocking)\n \n \n class ParamBucket(Bucket):\n@@ -98,6 +98,8 @@\n \n self._fill = 0\n for p in self._params:\n+ if p.dtype != self.buffer.dtype:\n+ p.data = p.data.to(self.buffer.dtype)\n self._add_param_as_view(p, keep_existing_value=False)\n", "issue": "The Argument broadcast_fp16 is not working on OSS\n## \ud83d\udc1b Bug\r\n\r\nThe Argument `broadcast_fp16` is not working on `OSS`.\r\n\r\n## Command\r\n\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Add one print line in fairscale's source code [here](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/optim/oss.py#L603)\r\n```\r\n for device in self.buckets.keys():\r\n for dst_rank, bucket in self.buckets[device].items():\r\n print(bucket.buffer.dtype, bucket.buffer.shape)\r\n work_handles.append(\r\n```\r\n2. Run a test script as below:\r\n```\r\nimport sys\r\nimport pdb\r\nimport torch\r\nimport torch.distributed as dist\r\nimport torch.multiprocessing as mp\r\nfrom torch.nn.parallel import DistributedDataParallel as DDP\r\n\r\nfrom fairscale.optim.oss import OSS\r\nfrom fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\r\n\r\nCUDA_DEVICE = [0, 1]\r\nHIDDEN_SIZE = 8\r\n\r\nclass ToyModel(torch.nn.Module):\r\n def __init__(self):\r\n super(ToyModel, self).__init__()\r\n self.net1 = torch.nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)\r\n self.relu = torch.nn.ReLU()\r\n self.net2 = torch.nn.Linear(HIDDEN_SIZE, 5)\r\n\r\n def forward(self, x):\r\n return self.net2(self.relu(self.net1(x)))\r\n\r\n\r\ndef main(rank, world_size):\r\n\r\n dist.init_process_group(\r\n backend='nccl',\r\n init_method=\"tcp://localhost:29501\",\r\n rank=rank,\r\n world_size=world_size)\r\n device = CUDA_DEVICE[rank]\r\n torch.manual_seed(rank + 1007)\r\n torch.cuda.set_device(device)\r\n torch.cuda.manual_seed(rank + 1007)\r\n print('Rank: %d/%d Device: %s' % (rank, world_size, device))\r\n\r\n\r\n model = ToyModel().to(device)\r\n loss_fn = torch.nn.MSELoss()\r\n # ddp_model = DDP(model, device_ids=[device])\r\n # optimizer = torch.optim.SGD(ddp_model.parameters(), lr=0.1)\r\n optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=0.1, broadcast_fp16=True)\r\n ddp_model = ShardedDDP(model, optimizer)\r\n\r\n inputs = torch.randn(2, HIDDEN_SIZE).to(device)\r\n print('inputs', rank, inputs)\r\n\r\n optimizer.zero_grad()\r\n outputs = ddp_model(inputs)\r\n labels = torch.randn(2, 5).to(device)\r\n loss_fn(outputs, labels).backward()\r\n optimizer.step()\r\n \r\n\r\n dist.destroy_process_group()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n WORLD_SIZE = len(CUDA_DEVICE)\r\n mp.spawn(\r\n main,\r\n args=(WORLD_SIZE,),\r\n nprocs=WORLD_SIZE,\r\n join=True,\r\n )\r\n\r\n```\r\n\r\nAnd the output is:\r\n```\r\n...\r\ntorch.float32 torch.Size([64])\r\ntorch.float32 torch.Size([53])\r\ntorch.float32 torch.Size([64])\r\ntorch.float32 torch.Size([53])\r\n```\r\n\r\nNote that we have already set the argument `broadcast_fp16` to be true, but fairscale still boardcasts float32 tensor.\r\n\r\n## Reason\r\n\r\nThe Tensor.to [(code here)](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) method is not in-place operation.\r\n\r\n## Posible Solution\r\nChange [fairscale/nn/misc/param_bucket.py:35](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) to \r\n```\r\nself.buffer = self.buffer.to(device, dtype, non_blocking)\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Any, Callable, List, Optional, Union\n\nimport torch\n\n\nclass Bucket:\n \"\"\"\n Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n self._params: List[torch.Tensor] = []\n self._param_ids: List[int] = []\n self._fill = 0\n\n # The actual flat tensor\n self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n assert self.buffer is not None, \"Cannot move a collapsed bucket, please rebuild it\"\n self.buffer.to(device, dtype, non_blocking)\n\n\nclass ParamBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of parameter buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n super().__init__(size, dtype, device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_params()\n\n @torch.no_grad()\n def add_param(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same param cannot be checked in twice\"\n\n self._add_param_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer is not None\n assert (\n param.dtype == self.buffer.dtype\n ), f\"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}\"\n assert (\n param.device == self.buffer.device\n ), f\"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}\"\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current param value\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.data.flatten())\n param.data = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n\n @torch.no_grad()\n def _reattach_params(self) -> None:\n \"\"\"\n Given the parameters which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n self._add_param_as_view(p, keep_existing_value=False)\n\n\nclass GradBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of gradient buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None:\n super().__init__(size, dtype, device)\n\n self._max_size = size\n self._is_collapsed = False\n\n self.params_checked_in = 0\n self.destination = destination\n self.sent = True\n self.callback: Optional[Callable[[Any], None]] = None\n\n def reset_checked_in(self) -> None:\n \"\"\" Reset the counter of the parameter grads which have been checked in\n \"\"\"\n self.params_checked_in = 0\n self.sent = False\n\n @property\n def all_checked_in(self) -> bool:\n \"\"\" Have all the expected gradient check-in happened ?\"\"\"\n return len(self._params) == self.params_checked_in\n\n def can_add_grad_view(self, param: torch.Tensor) -> bool:\n \"\"\" Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?\n \"\"\"\n return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"GradBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n if self._is_collapsed:\n self.rebuild()\n\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_grads()\n\n def zero(self) -> None:\n \"\"\"\n Set all the grads to zero\n \"\"\"\n self.buffer.fill_(0.0)\n\n @torch.no_grad()\n def add_grad(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same gradients cannot be checked in twice\"\n\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n\n self._add_grad_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def collapse(self) -> None:\n \"\"\"\n Release the buffer from memory. The bucket will need to be rebuilt before use\n \"\"\"\n if not self._is_collapsed:\n for p in self._params:\n assert p.grad is not None\n p.grad.detach_()\n p.grad = None\n\n self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device)\n self._fill = 0\n self.params_checked_in = 0\n self._is_collapsed = True\n\n @torch.no_grad()\n def rebuild(self) -> None:\n \"\"\"\n Given the parameter gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n if self._is_collapsed:\n self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device)\n\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._is_collapsed = False\n\n @torch.no_grad()\n def shrink(self) -> None:\n \"\"\"\n Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory\n \"\"\"\n assert self.buffer.numel() > 0, \"Cannot shrink a collapsed bucket, please rebuild\"\n\n self.buffer = self.buffer.resize_(self._fill).clone()\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._max_size = self._fill\n\n @torch.no_grad()\n def _reattach_grads(self) -> None:\n \"\"\"\n Given the parameters gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p, keep_existing_value=False)\n\n @torch.no_grad()\n def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer.numel() > 0, \"Cannot add a gradient to a collapsed bucket, please rebuild\"\n assert param.dtype == self.buffer.dtype\n assert param.device == self.buffer.device\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current grad value, if any\n if param.grad is not None:\n # keep param.grad in place\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten())\n param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data)\n else:\n param.grad = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n", "path": "fairscale/nn/misc/param_bucket.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Any, Callable, List, Optional, Union\n\nimport torch\n\n\nclass Bucket:\n \"\"\"\n Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n self._params: List[torch.Tensor] = []\n self._param_ids: List[int] = []\n self._fill = 0\n\n # The actual flat tensor\n self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n assert self.buffer is not None, \"Cannot move a collapsed bucket, please rebuild it\"\n self.buffer = self.buffer.to(device, dtype, non_blocking)\n\n\nclass ParamBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of parameter buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n super().__init__(size, dtype, device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_params()\n\n @torch.no_grad()\n def add_param(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same param cannot be checked in twice\"\n\n self._add_param_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer is not None\n assert (\n param.dtype == self.buffer.dtype\n ), f\"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}\"\n assert (\n param.device == self.buffer.device\n ), f\"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}\"\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current param value\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.data.flatten())\n param.data = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n\n @torch.no_grad()\n def _reattach_params(self) -> None:\n \"\"\"\n Given the parameters which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n if p.dtype != self.buffer.dtype:\n p.data = p.data.to(self.buffer.dtype)\n self._add_param_as_view(p, keep_existing_value=False)\n\n\nclass GradBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of gradient buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None:\n super().__init__(size, dtype, device)\n\n self._max_size = size\n self._is_collapsed = False\n\n self.params_checked_in = 0\n self.destination = destination\n self.sent = True\n self.callback: Optional[Callable[[Any], None]] = None\n\n def reset_checked_in(self) -> None:\n \"\"\" Reset the counter of the parameter grads which have been checked in\n \"\"\"\n self.params_checked_in = 0\n self.sent = False\n\n @property\n def all_checked_in(self) -> bool:\n \"\"\" Have all the expected gradient check-in happened ?\"\"\"\n return len(self._params) == self.params_checked_in\n\n def can_add_grad_view(self, param: torch.Tensor) -> bool:\n \"\"\" Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?\n \"\"\"\n return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"GradBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n if self._is_collapsed:\n self.rebuild()\n\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_grads()\n\n def zero(self) -> None:\n \"\"\"\n Set all the grads to zero\n \"\"\"\n self.buffer.fill_(0.0)\n\n @torch.no_grad()\n def add_grad(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same gradients cannot be checked in twice\"\n\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n\n self._add_grad_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def collapse(self) -> None:\n \"\"\"\n Release the buffer from memory. The bucket will need to be rebuilt before use\n \"\"\"\n if not self._is_collapsed:\n for p in self._params:\n assert p.grad is not None\n p.grad.detach_()\n p.grad = None\n\n self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device)\n self._fill = 0\n self.params_checked_in = 0\n self._is_collapsed = True\n\n @torch.no_grad()\n def rebuild(self) -> None:\n \"\"\"\n Given the parameter gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n if self._is_collapsed:\n self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device)\n\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._is_collapsed = False\n\n @torch.no_grad()\n def shrink(self) -> None:\n \"\"\"\n Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory\n \"\"\"\n assert self.buffer.numel() > 0, \"Cannot shrink a collapsed bucket, please rebuild\"\n\n self.buffer = self.buffer.resize_(self._fill).clone()\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._max_size = self._fill\n\n @torch.no_grad()\n def _reattach_grads(self) -> None:\n \"\"\"\n Given the parameters gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p, keep_existing_value=False)\n\n @torch.no_grad()\n def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer.numel() > 0, \"Cannot add a gradient to a collapsed bucket, please rebuild\"\n assert param.dtype == self.buffer.dtype\n assert param.device == self.buffer.device\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current grad value, if any\n if param.grad is not None:\n # keep param.grad in place\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten())\n param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data)\n else:\n param.grad = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n", "path": "fairscale/nn/misc/param_bucket.py"}]}
3,769
190
gh_patches_debug_21047
rasdani/github-patches
git_diff
dask__distributed-3387
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When --dashboard-prefix is used root location is always redirected to /status without adding the prefix The problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/dashboard/core.py` Content: ``` 1 from distutils.version import LooseVersion 2 import os 3 import warnings 4 5 import bokeh 6 from bokeh.server.server import Server 7 from tornado import web 8 9 10 if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"): 11 warnings.warn( 12 "\nDask needs bokeh >= 0.13.0 for the dashboard." 13 "\nContinuing without the dashboard." 14 ) 15 raise ImportError("Dask needs bokeh >= 0.13.0") 16 17 18 class BokehServer(object): 19 server_kwargs = {} 20 21 def listen(self, addr): 22 if self.server: 23 return 24 if isinstance(addr, tuple): 25 ip, port = addr 26 else: 27 port = addr 28 ip = None 29 for i in range(5): 30 try: 31 server_kwargs = dict( 32 port=port, 33 address=ip, 34 check_unused_sessions_milliseconds=500, 35 allow_websocket_origin=["*"], 36 use_index=False, 37 extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})], 38 ) 39 server_kwargs.update(self.server_kwargs) 40 self.server = Server(self.apps, **server_kwargs) 41 self.server.start() 42 43 handlers = [ 44 ( 45 self.prefix + r"/statics/(.*)", 46 web.StaticFileHandler, 47 {"path": os.path.join(os.path.dirname(__file__), "static")}, 48 ) 49 ] 50 51 self.server._tornado.add_handlers(r".*", handlers) 52 53 return 54 except (SystemExit, EnvironmentError) as exc: 55 if port != 0: 56 if "already in use" in str( 57 exc 58 ) or "Only one usage of" in str( # Unix/Mac 59 exc 60 ): # Windows 61 msg = ( 62 "Port %d is already in use. " 63 "\nPerhaps you already have a cluster running?" 64 "\nHosting the diagnostics dashboard on a random port instead." 65 % port 66 ) 67 else: 68 msg = ( 69 "Failed to start diagnostics server on port %d. " % port 70 + str(exc) 71 ) 72 warnings.warn("\n" + msg) 73 port = 0 74 if i == 4: 75 raise 76 77 @property 78 def port(self): 79 return ( 80 self.server.port 81 or list(self.server._http._sockets.values())[0].getsockname()[1] 82 ) 83 84 def stop(self): 85 for context in self.server._tornado._applications.values(): 86 context.run_unload_hook() 87 88 self.server._tornado._stats_job.stop() 89 self.server._tornado._cleanup_job.stop() 90 if self.server._tornado._ping_job is not None: 91 self.server._tornado._ping_job.stop() 92 93 # https://github.com/bokeh/bokeh/issues/5494 94 if LooseVersion(bokeh.__version__) >= "0.12.4": 95 self.server.stop() 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py --- a/distributed/dashboard/core.py +++ b/distributed/dashboard/core.py @@ -5,6 +5,7 @@ import bokeh from bokeh.server.server import Server from tornado import web +from urllib.parse import urljoin if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"): @@ -34,7 +35,13 @@ check_unused_sessions_milliseconds=500, allow_websocket_origin=["*"], use_index=False, - extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})], + extra_patterns=[ + ( + r"/", + web.RedirectHandler, + {"url": urljoin(self.prefix.rstrip("/") + "/", r"status")}, + ) + ], ) server_kwargs.update(self.server_kwargs) self.server = Server(self.apps, **server_kwargs)
{"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n import bokeh\n from bokeh.server.server import Server\n from tornado import web\n+from urllib.parse import urljoin\n \n \n if LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n@@ -34,7 +35,13 @@\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n- extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n+ extra_patterns=[\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin(self.prefix.rstrip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n", "issue": "When --dashboard-prefix is used root location is always redirected to /status without adding the prefix\nThe problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status.\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport bokeh\nfrom bokeh.server.server import Server\nfrom tornado import web\n\n\nif LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n warnings.warn(\n \"\\nDask needs bokeh >= 0.13.0 for the dashboard.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\"Dask needs bokeh >= 0.13.0\")\n\n\nclass BokehServer(object):\n server_kwargs = {}\n\n def listen(self, addr):\n if self.server:\n return\n if isinstance(addr, tuple):\n ip, port = addr\n else:\n port = addr\n ip = None\n for i in range(5):\n try:\n server_kwargs = dict(\n port=port,\n address=ip,\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n self.server.start()\n\n handlers = [\n (\n self.prefix + r\"/statics/(.*)\",\n web.StaticFileHandler,\n {\"path\": os.path.join(os.path.dirname(__file__), \"static\")},\n )\n ]\n\n self.server._tornado.add_handlers(r\".*\", handlers)\n\n return\n except (SystemExit, EnvironmentError) as exc:\n if port != 0:\n if \"already in use\" in str(\n exc\n ) or \"Only one usage of\" in str( # Unix/Mac\n exc\n ): # Windows\n msg = (\n \"Port %d is already in use. \"\n \"\\nPerhaps you already have a cluster running?\"\n \"\\nHosting the diagnostics dashboard on a random port instead.\"\n % port\n )\n else:\n msg = (\n \"Failed to start diagnostics server on port %d. \" % port\n + str(exc)\n )\n warnings.warn(\"\\n\" + msg)\n port = 0\n if i == 4:\n raise\n\n @property\n def port(self):\n return (\n self.server.port\n or list(self.server._http._sockets.values())[0].getsockname()[1]\n )\n\n def stop(self):\n for context in self.server._tornado._applications.values():\n context.run_unload_hook()\n\n self.server._tornado._stats_job.stop()\n self.server._tornado._cleanup_job.stop()\n if self.server._tornado._ping_job is not None:\n self.server._tornado._ping_job.stop()\n\n # https://github.com/bokeh/bokeh/issues/5494\n if LooseVersion(bokeh.__version__) >= \"0.12.4\":\n self.server.stop()\n", "path": "distributed/dashboard/core.py"}], "after_files": [{"content": "from distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport bokeh\nfrom bokeh.server.server import Server\nfrom tornado import web\nfrom urllib.parse import urljoin\n\n\nif LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n warnings.warn(\n \"\\nDask needs bokeh >= 0.13.0 for the dashboard.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\"Dask needs bokeh >= 0.13.0\")\n\n\nclass BokehServer(object):\n server_kwargs = {}\n\n def listen(self, addr):\n if self.server:\n return\n if isinstance(addr, tuple):\n ip, port = addr\n else:\n port = addr\n ip = None\n for i in range(5):\n try:\n server_kwargs = dict(\n port=port,\n address=ip,\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n extra_patterns=[\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin(self.prefix.rstrip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n self.server.start()\n\n handlers = [\n (\n self.prefix + r\"/statics/(.*)\",\n web.StaticFileHandler,\n {\"path\": os.path.join(os.path.dirname(__file__), \"static\")},\n )\n ]\n\n self.server._tornado.add_handlers(r\".*\", handlers)\n\n return\n except (SystemExit, EnvironmentError) as exc:\n if port != 0:\n if \"already in use\" in str(\n exc\n ) or \"Only one usage of\" in str( # Unix/Mac\n exc\n ): # Windows\n msg = (\n \"Port %d is already in use. \"\n \"\\nPerhaps you already have a cluster running?\"\n \"\\nHosting the diagnostics dashboard on a random port instead.\"\n % port\n )\n else:\n msg = (\n \"Failed to start diagnostics server on port %d. \" % port\n + str(exc)\n )\n warnings.warn(\"\\n\" + msg)\n port = 0\n if i == 4:\n raise\n\n @property\n def port(self):\n return (\n self.server.port\n or list(self.server._http._sockets.values())[0].getsockname()[1]\n )\n\n def stop(self):\n for context in self.server._tornado._applications.values():\n context.run_unload_hook()\n\n self.server._tornado._stats_job.stop()\n self.server._tornado._cleanup_job.stop()\n if self.server._tornado._ping_job is not None:\n self.server._tornado._ping_job.stop()\n\n # https://github.com/bokeh/bokeh/issues/5494\n if LooseVersion(bokeh.__version__) >= \"0.12.4\":\n self.server.stop()\n", "path": "distributed/dashboard/core.py"}]}
1,139
209
gh_patches_debug_40460
rasdani/github-patches
git_diff
vacanza__python-holidays-371
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong workday info for country HR Today (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore. binary_sensor: - platform: workday country: HR ![image](https://user-images.githubusercontent.com/61622258/95419607-4082d000-093a-11eb-904c-77a2639b1719.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `holidays/countries/croatia.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # python-holidays 4 # --------------- 5 # A fast, efficient Python library for generating country, province and state 6 # specific sets of holidays on the fly. It aims to make determining whether a 7 # specific date is a holiday as fast and flexible as possible. 8 # 9 # Author: ryanss <[email protected]> (c) 2014-2017 10 # dr-prodigy <[email protected]> (c) 2017-2020 11 # Website: https://github.com/dr-prodigy/python-holidays 12 # License: MIT (see LICENSE file) 13 14 from datetime import date 15 16 from dateutil.easter import easter 17 from dateutil.relativedelta import relativedelta as rd 18 19 from holidays.constants import JAN, MAY, JUN, AUG, OCT, \ 20 NOV, DEC 21 from holidays.holiday_base import HolidayBase 22 23 24 class Croatia(HolidayBase): 25 26 # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia 27 28 def __init__(self, **kwargs): 29 self.country = "HR" 30 HolidayBase.__init__(self, **kwargs) 31 32 def _populate(self, year): 33 # New years 34 self[date(year, JAN, 1)] = "Nova Godina" 35 # Epiphany 36 self[date(year, JAN, 6)] = "Sveta tri kralja" 37 easter_date = easter(year) 38 39 # Easter 40 self[easter_date] = "Uskrs" 41 # Easter Monday 42 self[easter_date + rd(days=1)] = "UskrΕ‘nji ponedjeljak" 43 44 # Corpus Christi 45 self[easter_date + rd(days=60)] = "Tijelovo" 46 47 # International Workers' Day 48 self[date(year, MAY, 1)] = "MeΔ‘unarodni praznik rada" 49 50 if year >= 2020: 51 # Statehood day 52 self[date(year, MAY, 30)] = "Dan drΕΎavnosti" 53 54 # Anti-fascist struggle day 55 self[date(year, JUN, 22)] = "Dan antifaΕ‘ističke borbe" 56 57 if year < 2020: 58 # Statehood day 59 self[date(year, JUN, 25)] = "Dan drΕΎavnosti" 60 61 # Victory and Homeland Thanksgiving Day 62 self[date(year, AUG, 5)] = "Dan pobjede i domovinske zahvalnosti" 63 64 # Assumption of Mary 65 self[date(year, AUG, 15)] = "Velika Gospa" 66 67 if year < 2020: 68 # Independence Day 69 self[date(year, OCT, 8)] = "Dan neovisnosti" 70 71 # All Saints' Day 72 self[date(year, NOV, 1)] = "Dan svih svetih" 73 74 if year >= 2020: 75 # Memorial day 76 self[date(year, NOV, 18)] =\ 77 "Dan sjeΔ‡anja na ΕΎrtve Domovinskog rata" 78 79 # Christmas day 80 self[date(year, DEC, 25)] = "BoΕΎiΔ‡" 81 82 # St. Stephen's day 83 self[date(year, DEC, 26)] = "Sveti Stjepan" 84 85 86 class HR(Croatia): 87 pass 88 89 90 class HRV(Croatia): 91 pass 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py --- a/holidays/countries/croatia.py +++ b/holidays/countries/croatia.py @@ -11,18 +11,18 @@ # Website: https://github.com/dr-prodigy/python-holidays # License: MIT (see LICENSE file) -from datetime import date +from datetime import date, timedelta from dateutil.easter import easter -from dateutil.relativedelta import relativedelta as rd -from holidays.constants import JAN, MAY, JUN, AUG, OCT, \ - NOV, DEC +from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC from holidays.holiday_base import HolidayBase class Croatia(HolidayBase): + # Updated with act 022-03 / 19-01 / 219 of 14 November 2019 + # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia def __init__(self, **kwargs): @@ -32,6 +32,7 @@ def _populate(self, year): # New years self[date(year, JAN, 1)] = "Nova Godina" + # Epiphany self[date(year, JAN, 6)] = "Sveta tri kralja" easter_date = easter(year) @@ -39,23 +40,23 @@ # Easter self[easter_date] = "Uskrs" # Easter Monday - self[easter_date + rd(days=1)] = "UskrΕ‘nji ponedjeljak" + self[easter_date + timedelta(days=1)] = "Uskrsni ponedjeljak" # Corpus Christi - self[easter_date + rd(days=60)] = "Tijelovo" + self[easter_date + timedelta(days=60)] = "Tijelovo" # International Workers' Day self[date(year, MAY, 1)] = "MeΔ‘unarodni praznik rada" + # Statehood day (new) if year >= 2020: - # Statehood day self[date(year, MAY, 30)] = "Dan drΕΎavnosti" # Anti-fascist struggle day self[date(year, JUN, 22)] = "Dan antifaΕ‘ističke borbe" + # Statehood day (old) if year < 2020: - # Statehood day self[date(year, JUN, 25)] = "Dan drΕΎavnosti" # Victory and Homeland Thanksgiving Day @@ -64,17 +65,16 @@ # Assumption of Mary self[date(year, AUG, 15)] = "Velika Gospa" + # Independence Day (old) if year < 2020: - # Independence Day self[date(year, OCT, 8)] = "Dan neovisnosti" # All Saints' Day - self[date(year, NOV, 1)] = "Dan svih svetih" + self[date(year, NOV, 1)] = "Svi sveti" if year >= 2020: # Memorial day - self[date(year, NOV, 18)] =\ - "Dan sjeΔ‡anja na ΕΎrtve Domovinskog rata" + self[date(year, NOV, 18)] = "Dan sjeΔ‡anja" # Christmas day self[date(year, DEC, 25)] = "BoΕΎiΔ‡"
{"golden_diff": "diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py\n--- a/holidays/countries/croatia.py\n+++ b/holidays/countries/croatia.py\n@@ -11,18 +11,18 @@\n # Website: https://github.com/dr-prodigy/python-holidays\n # License: MIT (see LICENSE file)\n \n-from datetime import date\n+from datetime import date, timedelta\n \n from dateutil.easter import easter\n-from dateutil.relativedelta import relativedelta as rd\n \n-from holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n- NOV, DEC\n+from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC\n from holidays.holiday_base import HolidayBase\n \n \n class Croatia(HolidayBase):\n \n+ # Updated with act 022-03 / 19-01 / 219 of 14 November 2019\n+ # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n \n def __init__(self, **kwargs):\n@@ -32,6 +32,7 @@\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n+\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n@@ -39,23 +40,23 @@\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n- self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n+ self[easter_date + timedelta(days=1)] = \"Uskrsni ponedjeljak\"\n \n # Corpus Christi\n- self[easter_date + rd(days=60)] = \"Tijelovo\"\n+ self[easter_date + timedelta(days=60)] = \"Tijelovo\"\n \n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n \n+ # Statehood day (new)\n if year >= 2020:\n- # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n \n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n \n+ # Statehood day (old)\n if year < 2020:\n- # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n \n # Victory and Homeland Thanksgiving Day\n@@ -64,17 +65,16 @@\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n \n+ # Independence Day (old)\n if year < 2020:\n- # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n \n # All Saints' Day\n- self[date(year, NOV, 1)] = \"Dan svih svetih\"\n+ self[date(year, NOV, 1)] = \"Svi sveti\"\n \n if year >= 2020:\n # Memorial day\n- self[date(year, NOV, 18)] =\\\n- \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n+ self[date(year, NOV, 18)] = \"Dan sje\u0107anja\"\n \n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n", "issue": "Wrong workday info for country HR\nToday (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore.\r\n\r\nbinary_sensor:\r\n - platform: workday\r\n country: HR\r\n\r\n![image](https://user-images.githubusercontent.com/61622258/95419607-4082d000-093a-11eb-904c-77a2639b1719.png)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Croatia(HolidayBase):\n\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n\n def __init__(self, **kwargs):\n self.country = \"HR\"\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n\n # Corpus Christi\n self[easter_date + rd(days=60)] = \"Tijelovo\"\n\n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n\n if year >= 2020:\n # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n\n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n\n if year < 2020:\n # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n\n # Victory and Homeland Thanksgiving Day\n self[date(year, AUG, 5)] = \"Dan pobjede i domovinske zahvalnosti\"\n\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n\n if year < 2020:\n # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n\n # All Saints' Day\n self[date(year, NOV, 1)] = \"Dan svih svetih\"\n\n if year >= 2020:\n # Memorial day\n self[date(year, NOV, 18)] =\\\n \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n\n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n\n # St. Stephen's day\n self[date(year, DEC, 26)] = \"Sveti Stjepan\"\n\n\nclass HR(Croatia):\n pass\n\n\nclass HRV(Croatia):\n pass\n", "path": "holidays/countries/croatia.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date, timedelta\n\nfrom dateutil.easter import easter\n\nfrom holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Croatia(HolidayBase):\n\n # Updated with act 022-03 / 19-01 / 219 of 14 November 2019\n # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n\n def __init__(self, **kwargs):\n self.country = \"HR\"\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n self[easter_date + timedelta(days=1)] = \"Uskrsni ponedjeljak\"\n\n # Corpus Christi\n self[easter_date + timedelta(days=60)] = \"Tijelovo\"\n\n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n\n # Statehood day (new)\n if year >= 2020:\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n\n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n\n # Statehood day (old)\n if year < 2020:\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n\n # Victory and Homeland Thanksgiving Day\n self[date(year, AUG, 5)] = \"Dan pobjede i domovinske zahvalnosti\"\n\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n\n # Independence Day (old)\n if year < 2020:\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n\n # All Saints' Day\n self[date(year, NOV, 1)] = \"Svi sveti\"\n\n if year >= 2020:\n # Memorial day\n self[date(year, NOV, 18)] = \"Dan sje\u0107anja\"\n\n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n\n # St. Stephen's day\n self[date(year, DEC, 26)] = \"Sveti Stjepan\"\n\n\nclass HR(Croatia):\n pass\n\n\nclass HRV(Croatia):\n pass\n", "path": "holidays/countries/croatia.py"}]}
1,373
883
gh_patches_debug_7385
rasdani/github-patches
git_diff
mozilla__bugbug-138
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix linting issue preventing us from updating to flake8 4.7.1 https://github.com/mozilla/bugbug/pull/135 is failing because: > ./bugbug/model.py:102:21: E117 over-indented We should fix it, and then we can merge https://github.com/mozilla/bugbug/pull/135. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/model.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import numpy as np 7 import shap 8 from imblearn.metrics import classification_report_imbalanced 9 from imblearn.under_sampling import RandomUnderSampler 10 from sklearn import metrics 11 from sklearn.externals import joblib 12 from sklearn.feature_extraction.text import TfidfVectorizer 13 from sklearn.model_selection import cross_validate 14 from sklearn.model_selection import train_test_split 15 16 from bugbug import bugzilla 17 from bugbug.nlp import SpacyVectorizer 18 19 20 class Model(): 21 def __init__(self, lemmatization=False): 22 if lemmatization: 23 self.text_vectorizer = SpacyVectorizer 24 else: 25 self.text_vectorizer = TfidfVectorizer 26 27 self.undersampling_enabled = True 28 self.cross_validation_enabled = True 29 30 def get_feature_names(self): 31 return [] 32 33 def get_important_features(self, cutoff, shap_values): 34 # Calculate the values that represent the fraction of the model output variability attributable 35 # to each feature across the whole dataset. 36 shap_sums = shap_values.sum(0) 37 abs_shap_sums = np.abs(shap_values).sum(0) 38 rel_shap_sums = abs_shap_sums / abs_shap_sums.sum() 39 40 cut_off_value = cutoff * np.amax(rel_shap_sums) 41 42 # Get indices of features that pass the cut off value 43 top_feature_indices = np.where(rel_shap_sums >= cut_off_value)[0] 44 # Get the importance values of the top features from their indices 45 top_features = np.take(rel_shap_sums, top_feature_indices) 46 # Gets the sign of the importance from shap_sums as boolean 47 is_positive = (np.take(shap_sums, top_feature_indices)) >= 0 48 # Stack the importance, indices and shap_sums in a 2D array 49 top_features = np.column_stack((top_features, top_feature_indices, is_positive)) 50 # Sort the array (in decreasing order of importance values) 51 top_features = top_features[top_features[:, 0].argsort()][::-1] 52 53 return top_features 54 55 def train(self, importance_cutoff=0.15): 56 classes = self.get_labels() 57 class_names = sorted(list(set(classes.values())), reverse=True) 58 59 # Get bugs, filtering out those for which we have no labels. 60 def bugs(): 61 return (bug for bug in bugzilla.get_bugs() if bug['id'] in classes) 62 63 # Calculate labels. 64 y = np.array([classes[bug['id']] for bug in bugs()]) 65 66 # Extract features from the bugs. 67 X = self.extraction_pipeline.fit_transform(bugs()) 68 69 print(f'X: {X.shape}, y: {y.shape}') 70 71 # Split dataset in training and test. 72 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) 73 74 if self.undersampling_enabled: 75 # Under-sample the majority classes, as the datasets are imbalanced. 76 X_train, y_train = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train) 77 78 print(f'X_train: {X_train.shape}, y_train: {y_train.shape}') 79 print(f'X_test: {X_test.shape}, y_test: {y_test.shape}') 80 81 # Use k-fold cross validation to evaluate results. 82 if self.cross_validation_enabled: 83 scorings = ['accuracy', 'precision', 'recall'] 84 scores = cross_validate(self.clf, X_train, y_train, scoring=scorings, cv=5) 85 print('Cross Validation scores:') 86 for scoring in scorings: 87 score = scores[f'test_{scoring}'] 88 print(f'{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})') 89 90 # Evaluate results on the test set. 91 self.clf.fit(X_train, y_train) 92 93 feature_names = self.get_feature_names() 94 if len(feature_names): 95 explainer = shap.TreeExplainer(self.clf) 96 shap_values = explainer.shap_values(X_train) 97 98 important_features = self.get_important_features(importance_cutoff, shap_values) 99 100 print(f'\nTop {len(important_features)} Features :') 101 for i, [importance, index, is_positive] in enumerate(important_features): 102 print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})') 103 104 y_pred = self.clf.predict(X_test) 105 106 print(f'No confidence threshold - {len(y_test)} classified') 107 print(metrics.confusion_matrix(y_test, y_pred, labels=class_names)) 108 print(classification_report_imbalanced(y_test, y_pred, labels=class_names)) 109 110 # Evaluate results on the test set for some confidence thresholds. 111 for confidence_threshold in [0.6, 0.7, 0.8, 0.9]: 112 y_pred_probas = self.clf.predict_proba(X_test) 113 114 y_test_filter = [] 115 y_pred_filter = [] 116 for i in range(0, len(y_test)): 117 argmax = np.argmax(y_pred_probas[i]) 118 if y_pred_probas[i][argmax] < confidence_threshold: 119 continue 120 121 y_test_filter.append(y_test[i]) 122 y_pred_filter.append(argmax) 123 124 y_pred_filter = self.clf._le.inverse_transform(y_pred_filter) 125 126 print(f'\nConfidence threshold > {confidence_threshold} - {len(y_test_filter)} classified') 127 print(metrics.confusion_matrix(y_test_filter, y_pred_filter, labels=class_names)) 128 print(classification_report_imbalanced(y_test_filter, y_pred_filter, labels=class_names)) 129 130 joblib.dump(self, self.__class__.__name__.lower()) 131 132 @staticmethod 133 def load(model_file_name): 134 return joblib.load(model_file_name) 135 136 def overwrite_classes(self, bugs, classes, probabilities): 137 return classes 138 139 def classify(self, bugs, probabilities=False, importances=False, importance_cutoff=0.15): 140 assert bugs is not None 141 assert self.extraction_pipeline is not None and self.clf is not None, 'The module needs to be initialized first' 142 143 if not isinstance(bugs, list): 144 bugs = [bugs] 145 146 assert isinstance(bugs[0], dict) 147 148 X = self.extraction_pipeline.transform(bugs) 149 if probabilities: 150 classes = self.clf.predict_proba(X) 151 else: 152 classes = self.clf.predict(X) 153 154 classes = self.overwrite_classes(bugs, classes, probabilities) 155 156 if importances: 157 explainer = shap.TreeExplainer(self.clf) 158 shap_values = explainer.shap_values(X) 159 160 importances = self.get_important_features(importance_cutoff, shap_values) 161 162 return classes, importances 163 164 return classes 165 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/model.py b/bugbug/model.py --- a/bugbug/model.py +++ b/bugbug/model.py @@ -99,7 +99,7 @@ print(f'\nTop {len(important_features)} Features :') for i, [importance, index, is_positive] in enumerate(important_features): - print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})') + print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})') y_pred = self.clf.predict(X_test)
{"golden_diff": "diff --git a/bugbug/model.py b/bugbug/model.py\n--- a/bugbug/model.py\n+++ b/bugbug/model.py\n@@ -99,7 +99,7 @@\n \n print(f'\\nTop {len(important_features)} Features :')\n for i, [importance, index, is_positive] in enumerate(important_features):\n- print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n+ print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n \n y_pred = self.clf.predict(X_test)\n", "issue": "Fix linting issue preventing us from updating to flake8 4.7.1\nhttps://github.com/mozilla/bugbug/pull/135 is failing because:\r\n> ./bugbug/model.py:102:21: E117 over-indented\r\n\r\nWe should fix it, and then we can merge https://github.com/mozilla/bugbug/pull/135.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport numpy as np\nimport shap\nfrom imblearn.metrics import classification_report_imbalanced\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import train_test_split\n\nfrom bugbug import bugzilla\nfrom bugbug.nlp import SpacyVectorizer\n\n\nclass Model():\n def __init__(self, lemmatization=False):\n if lemmatization:\n self.text_vectorizer = SpacyVectorizer\n else:\n self.text_vectorizer = TfidfVectorizer\n\n self.undersampling_enabled = True\n self.cross_validation_enabled = True\n\n def get_feature_names(self):\n return []\n\n def get_important_features(self, cutoff, shap_values):\n # Calculate the values that represent the fraction of the model output variability attributable\n # to each feature across the whole dataset.\n shap_sums = shap_values.sum(0)\n abs_shap_sums = np.abs(shap_values).sum(0)\n rel_shap_sums = abs_shap_sums / abs_shap_sums.sum()\n\n cut_off_value = cutoff * np.amax(rel_shap_sums)\n\n # Get indices of features that pass the cut off value\n top_feature_indices = np.where(rel_shap_sums >= cut_off_value)[0]\n # Get the importance values of the top features from their indices\n top_features = np.take(rel_shap_sums, top_feature_indices)\n # Gets the sign of the importance from shap_sums as boolean\n is_positive = (np.take(shap_sums, top_feature_indices)) >= 0\n # Stack the importance, indices and shap_sums in a 2D array\n top_features = np.column_stack((top_features, top_feature_indices, is_positive))\n # Sort the array (in decreasing order of importance values)\n top_features = top_features[top_features[:, 0].argsort()][::-1]\n\n return top_features\n\n def train(self, importance_cutoff=0.15):\n classes = self.get_labels()\n class_names = sorted(list(set(classes.values())), reverse=True)\n\n # Get bugs, filtering out those for which we have no labels.\n def bugs():\n return (bug for bug in bugzilla.get_bugs() if bug['id'] in classes)\n\n # Calculate labels.\n y = np.array([classes[bug['id']] for bug in bugs()])\n\n # Extract features from the bugs.\n X = self.extraction_pipeline.fit_transform(bugs())\n\n print(f'X: {X.shape}, y: {y.shape}')\n\n # Split dataset in training and test.\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)\n\n if self.undersampling_enabled:\n # Under-sample the majority classes, as the datasets are imbalanced.\n X_train, y_train = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train)\n\n print(f'X_train: {X_train.shape}, y_train: {y_train.shape}')\n print(f'X_test: {X_test.shape}, y_test: {y_test.shape}')\n\n # Use k-fold cross validation to evaluate results.\n if self.cross_validation_enabled:\n scorings = ['accuracy', 'precision', 'recall']\n scores = cross_validate(self.clf, X_train, y_train, scoring=scorings, cv=5)\n print('Cross Validation scores:')\n for scoring in scorings:\n score = scores[f'test_{scoring}']\n print(f'{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})')\n\n # Evaluate results on the test set.\n self.clf.fit(X_train, y_train)\n\n feature_names = self.get_feature_names()\n if len(feature_names):\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X_train)\n\n important_features = self.get_important_features(importance_cutoff, shap_values)\n\n print(f'\\nTop {len(important_features)} Features :')\n for i, [importance, index, is_positive] in enumerate(important_features):\n print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n\n y_pred = self.clf.predict(X_test)\n\n print(f'No confidence threshold - {len(y_test)} classified')\n print(metrics.confusion_matrix(y_test, y_pred, labels=class_names))\n print(classification_report_imbalanced(y_test, y_pred, labels=class_names))\n\n # Evaluate results on the test set for some confidence thresholds.\n for confidence_threshold in [0.6, 0.7, 0.8, 0.9]:\n y_pred_probas = self.clf.predict_proba(X_test)\n\n y_test_filter = []\n y_pred_filter = []\n for i in range(0, len(y_test)):\n argmax = np.argmax(y_pred_probas[i])\n if y_pred_probas[i][argmax] < confidence_threshold:\n continue\n\n y_test_filter.append(y_test[i])\n y_pred_filter.append(argmax)\n\n y_pred_filter = self.clf._le.inverse_transform(y_pred_filter)\n\n print(f'\\nConfidence threshold > {confidence_threshold} - {len(y_test_filter)} classified')\n print(metrics.confusion_matrix(y_test_filter, y_pred_filter, labels=class_names))\n print(classification_report_imbalanced(y_test_filter, y_pred_filter, labels=class_names))\n\n joblib.dump(self, self.__class__.__name__.lower())\n\n @staticmethod\n def load(model_file_name):\n return joblib.load(model_file_name)\n\n def overwrite_classes(self, bugs, classes, probabilities):\n return classes\n\n def classify(self, bugs, probabilities=False, importances=False, importance_cutoff=0.15):\n assert bugs is not None\n assert self.extraction_pipeline is not None and self.clf is not None, 'The module needs to be initialized first'\n\n if not isinstance(bugs, list):\n bugs = [bugs]\n\n assert isinstance(bugs[0], dict)\n\n X = self.extraction_pipeline.transform(bugs)\n if probabilities:\n classes = self.clf.predict_proba(X)\n else:\n classes = self.clf.predict(X)\n\n classes = self.overwrite_classes(bugs, classes, probabilities)\n\n if importances:\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X)\n\n importances = self.get_important_features(importance_cutoff, shap_values)\n\n return classes, importances\n\n return classes\n", "path": "bugbug/model.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport numpy as np\nimport shap\nfrom imblearn.metrics import classification_report_imbalanced\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import train_test_split\n\nfrom bugbug import bugzilla\nfrom bugbug.nlp import SpacyVectorizer\n\n\nclass Model():\n def __init__(self, lemmatization=False):\n if lemmatization:\n self.text_vectorizer = SpacyVectorizer\n else:\n self.text_vectorizer = TfidfVectorizer\n\n self.undersampling_enabled = True\n self.cross_validation_enabled = True\n\n def get_feature_names(self):\n return []\n\n def get_important_features(self, cutoff, shap_values):\n # Calculate the values that represent the fraction of the model output variability attributable\n # to each feature across the whole dataset.\n shap_sums = shap_values.sum(0)\n abs_shap_sums = np.abs(shap_values).sum(0)\n rel_shap_sums = abs_shap_sums / abs_shap_sums.sum()\n\n cut_off_value = cutoff * np.amax(rel_shap_sums)\n\n # Get indices of features that pass the cut off value\n top_feature_indices = np.where(rel_shap_sums >= cut_off_value)[0]\n # Get the importance values of the top features from their indices\n top_features = np.take(rel_shap_sums, top_feature_indices)\n # Gets the sign of the importance from shap_sums as boolean\n is_positive = (np.take(shap_sums, top_feature_indices)) >= 0\n # Stack the importance, indices and shap_sums in a 2D array\n top_features = np.column_stack((top_features, top_feature_indices, is_positive))\n # Sort the array (in decreasing order of importance values)\n top_features = top_features[top_features[:, 0].argsort()][::-1]\n\n return top_features\n\n def train(self, importance_cutoff=0.15):\n classes = self.get_labels()\n class_names = sorted(list(set(classes.values())), reverse=True)\n\n # Get bugs, filtering out those for which we have no labels.\n def bugs():\n return (bug for bug in bugzilla.get_bugs() if bug['id'] in classes)\n\n # Calculate labels.\n y = np.array([classes[bug['id']] for bug in bugs()])\n\n # Extract features from the bugs.\n X = self.extraction_pipeline.fit_transform(bugs())\n\n print(f'X: {X.shape}, y: {y.shape}')\n\n # Split dataset in training and test.\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)\n\n if self.undersampling_enabled:\n # Under-sample the majority classes, as the datasets are imbalanced.\n X_train, y_train = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train)\n\n print(f'X_train: {X_train.shape}, y_train: {y_train.shape}')\n print(f'X_test: {X_test.shape}, y_test: {y_test.shape}')\n\n # Use k-fold cross validation to evaluate results.\n if self.cross_validation_enabled:\n scorings = ['accuracy', 'precision', 'recall']\n scores = cross_validate(self.clf, X_train, y_train, scoring=scorings, cv=5)\n print('Cross Validation scores:')\n for scoring in scorings:\n score = scores[f'test_{scoring}']\n print(f'{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})')\n\n # Evaluate results on the test set.\n self.clf.fit(X_train, y_train)\n\n feature_names = self.get_feature_names()\n if len(feature_names):\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X_train)\n\n important_features = self.get_important_features(importance_cutoff, shap_values)\n\n print(f'\\nTop {len(important_features)} Features :')\n for i, [importance, index, is_positive] in enumerate(important_features):\n print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n\n y_pred = self.clf.predict(X_test)\n\n print(f'No confidence threshold - {len(y_test)} classified')\n print(metrics.confusion_matrix(y_test, y_pred, labels=class_names))\n print(classification_report_imbalanced(y_test, y_pred, labels=class_names))\n\n # Evaluate results on the test set for some confidence thresholds.\n for confidence_threshold in [0.6, 0.7, 0.8, 0.9]:\n y_pred_probas = self.clf.predict_proba(X_test)\n\n y_test_filter = []\n y_pred_filter = []\n for i in range(0, len(y_test)):\n argmax = np.argmax(y_pred_probas[i])\n if y_pred_probas[i][argmax] < confidence_threshold:\n continue\n\n y_test_filter.append(y_test[i])\n y_pred_filter.append(argmax)\n\n y_pred_filter = self.clf._le.inverse_transform(y_pred_filter)\n\n print(f'\\nConfidence threshold > {confidence_threshold} - {len(y_test_filter)} classified')\n print(metrics.confusion_matrix(y_test_filter, y_pred_filter, labels=class_names))\n print(classification_report_imbalanced(y_test_filter, y_pred_filter, labels=class_names))\n\n joblib.dump(self, self.__class__.__name__.lower())\n\n @staticmethod\n def load(model_file_name):\n return joblib.load(model_file_name)\n\n def overwrite_classes(self, bugs, classes, probabilities):\n return classes\n\n def classify(self, bugs, probabilities=False, importances=False, importance_cutoff=0.15):\n assert bugs is not None\n assert self.extraction_pipeline is not None and self.clf is not None, 'The module needs to be initialized first'\n\n if not isinstance(bugs, list):\n bugs = [bugs]\n\n assert isinstance(bugs[0], dict)\n\n X = self.extraction_pipeline.transform(bugs)\n if probabilities:\n classes = self.clf.predict_proba(X)\n else:\n classes = self.clf.predict(X)\n\n classes = self.overwrite_classes(bugs, classes, probabilities)\n\n if importances:\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X)\n\n importances = self.get_important_features(importance_cutoff, shap_values)\n\n return classes, importances\n\n return classes\n", "path": "bugbug/model.py"}]}
2,274
160
gh_patches_debug_39903
rasdani/github-patches
git_diff
scrapy__scrapy-5514
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop Python 3.6 support [It went end-of-life on December 2021](https://endoflife.date/python). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/utils/py36.py` Content: ``` 1 import warnings 2 3 from scrapy.exceptions import ScrapyDeprecationWarning 4 from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401 5 6 7 warnings.warn( 8 "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.", 9 category=ScrapyDeprecationWarning, 10 stacklevel=2, 11 ) 12 ``` Path: `setup.py` Content: ``` 1 from os.path import dirname, join 2 from pkg_resources import parse_version 3 from setuptools import setup, find_packages, __version__ as setuptools_version 4 5 6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: 7 version = f.read().decode('ascii').strip() 8 9 10 def has_environment_marker_platform_impl_support(): 11 """Code extracted from 'pytest/setup.py' 12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31 13 14 The first known release to support environment marker with range operators 15 it is 18.5, see: 16 https://setuptools.readthedocs.io/en/latest/history.html#id235 17 """ 18 return parse_version(setuptools_version) >= parse_version('18.5') 19 20 21 install_requires = [ 22 'Twisted>=17.9.0', 23 'cryptography>=2.0', 24 'cssselect>=0.9.1', 25 'itemloaders>=1.0.1', 26 'parsel>=1.5.0', 27 'pyOpenSSL>=16.2.0', 28 'queuelib>=1.4.2', 29 'service_identity>=16.0.0', 30 'w3lib>=1.17.0', 31 'zope.interface>=4.1.3', 32 'protego>=0.1.15', 33 'itemadapter>=0.1.0', 34 'setuptools', 35 'tldextract', 36 ] 37 extras_require = {} 38 cpython_dependencies = [ 39 'lxml>=3.5.0', 40 'PyDispatcher>=2.0.5', 41 ] 42 if has_environment_marker_platform_impl_support(): 43 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies 44 extras_require[':platform_python_implementation == "PyPy"'] = [ 45 # Earlier lxml versions are affected by 46 # https://foss.heptapod.net/pypy/pypy/-/issues/2498, 47 # which was fixed in Cython 0.26, released on 2017-06-19, and used to 48 # generate the C headers of lxml release tarballs published since then, the 49 # first of which was: 50 'lxml>=4.0.0', 51 'PyPyDispatcher>=2.1.0', 52 ] 53 else: 54 install_requires.extend(cpython_dependencies) 55 56 57 setup( 58 name='Scrapy', 59 version=version, 60 url='https://scrapy.org', 61 project_urls={ 62 'Documentation': 'https://docs.scrapy.org/', 63 'Source': 'https://github.com/scrapy/scrapy', 64 'Tracker': 'https://github.com/scrapy/scrapy/issues', 65 }, 66 description='A high-level Web Crawling and Web Scraping framework', 67 long_description=open('README.rst').read(), 68 author='Scrapy developers', 69 maintainer='Pablo Hoffman', 70 maintainer_email='[email protected]', 71 license='BSD', 72 packages=find_packages(exclude=('tests', 'tests.*')), 73 include_package_data=True, 74 zip_safe=False, 75 entry_points={ 76 'console_scripts': ['scrapy = scrapy.cmdline:execute'] 77 }, 78 classifiers=[ 79 'Framework :: Scrapy', 80 'Development Status :: 5 - Production/Stable', 81 'Environment :: Console', 82 'Intended Audience :: Developers', 83 'License :: OSI Approved :: BSD License', 84 'Operating System :: OS Independent', 85 'Programming Language :: Python', 86 'Programming Language :: Python :: 3', 87 'Programming Language :: Python :: 3.6', 88 'Programming Language :: Python :: 3.7', 89 'Programming Language :: Python :: 3.8', 90 'Programming Language :: Python :: 3.9', 91 'Programming Language :: Python :: 3.10', 92 'Programming Language :: Python :: Implementation :: CPython', 93 'Programming Language :: Python :: Implementation :: PyPy', 94 'Topic :: Internet :: WWW/HTTP', 95 'Topic :: Software Development :: Libraries :: Application Frameworks', 96 'Topic :: Software Development :: Libraries :: Python Modules', 97 ], 98 python_requires='>=3.6', 99 install_requires=install_requires, 100 extras_require=extras_require, 101 ) 102 ``` Path: `scrapy/__init__.py` Content: ``` 1 """ 2 Scrapy - a web crawling and web scraping framework written for Python 3 """ 4 5 import pkgutil 6 import sys 7 import warnings 8 9 from twisted import version as _txv 10 11 # Declare top-level shortcuts 12 from scrapy.spiders import Spider 13 from scrapy.http import Request, FormRequest 14 from scrapy.selector import Selector 15 from scrapy.item import Item, Field 16 17 18 __all__ = [ 19 '__version__', 'version_info', 'twisted_version', 'Spider', 20 'Request', 'FormRequest', 'Selector', 'Item', 'Field', 21 ] 22 23 24 # Scrapy and Twisted versions 25 __version__ = (pkgutil.get_data(__package__, "VERSION") or b"").decode("ascii").strip() 26 version_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.')) 27 twisted_version = (_txv.major, _txv.minor, _txv.micro) 28 29 30 # Check minimum required Python version 31 if sys.version_info < (3, 6): 32 print(f"Scrapy {__version__} requires Python 3.6+") 33 sys.exit(1) 34 35 36 # Ignore noisy twisted deprecation warnings 37 warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted') 38 39 40 del pkgutil 41 del sys 42 del warnings 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/__init__.py b/scrapy/__init__.py --- a/scrapy/__init__.py +++ b/scrapy/__init__.py @@ -28,8 +28,8 @@ # Check minimum required Python version -if sys.version_info < (3, 6): - print(f"Scrapy {__version__} requires Python 3.6+") +if sys.version_info < (3, 7): + print(f"Scrapy {__version__} requires Python 3.7+") sys.exit(1) diff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py deleted file mode 100644 --- a/scrapy/utils/py36.py +++ /dev/null @@ -1,11 +0,0 @@ -import warnings - -from scrapy.exceptions import ScrapyDeprecationWarning -from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401 - - -warnings.warn( - "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.", - category=ScrapyDeprecationWarning, - stacklevel=2, -) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,35 +19,29 @@ install_requires = [ - 'Twisted>=17.9.0', - 'cryptography>=2.0', + 'Twisted>=18.9.0', + 'cryptography>=2.8', 'cssselect>=0.9.1', 'itemloaders>=1.0.1', 'parsel>=1.5.0', - 'pyOpenSSL>=16.2.0', + 'pyOpenSSL>=19.1.0', 'queuelib>=1.4.2', 'service_identity>=16.0.0', 'w3lib>=1.17.0', - 'zope.interface>=4.1.3', + 'zope.interface>=5.1.0', 'protego>=0.1.15', 'itemadapter>=0.1.0', 'setuptools', 'tldextract', + 'lxml>=4.3.0', ] extras_require = {} cpython_dependencies = [ - 'lxml>=3.5.0', 'PyDispatcher>=2.0.5', ] if has_environment_marker_platform_impl_support(): extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies extras_require[':platform_python_implementation == "PyPy"'] = [ - # Earlier lxml versions are affected by - # https://foss.heptapod.net/pypy/pypy/-/issues/2498, - # which was fixed in Cython 0.26, released on 2017-06-19, and used to - # generate the C headers of lxml release tarballs published since then, the - # first of which was: - 'lxml>=4.0.0', 'PyPyDispatcher>=2.1.0', ] else: @@ -84,7 +78,6 @@ 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', @@ -95,7 +88,7 @@ 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], - python_requires='>=3.6', + python_requires='>=3.7', install_requires=install_requires, extras_require=extras_require, )
{"golden_diff": "diff --git a/scrapy/__init__.py b/scrapy/__init__.py\n--- a/scrapy/__init__.py\n+++ b/scrapy/__init__.py\n@@ -28,8 +28,8 @@\n \n \n # Check minimum required Python version\n-if sys.version_info < (3, 6):\n- print(f\"Scrapy {__version__} requires Python 3.6+\")\n+if sys.version_info < (3, 7):\n+ print(f\"Scrapy {__version__} requires Python 3.7+\")\n sys.exit(1)\n \n \ndiff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py\ndeleted file mode 100644\n--- a/scrapy/utils/py36.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-import warnings\n-\n-from scrapy.exceptions import ScrapyDeprecationWarning\n-from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n-\n-\n-warnings.warn(\n- \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n- category=ScrapyDeprecationWarning,\n- stacklevel=2,\n-)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,35 +19,29 @@\n \n \n install_requires = [\n- 'Twisted>=17.9.0',\n- 'cryptography>=2.0',\n+ 'Twisted>=18.9.0',\n+ 'cryptography>=2.8',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n- 'pyOpenSSL>=16.2.0',\n+ 'pyOpenSSL>=19.1.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n- 'zope.interface>=4.1.3',\n+ 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n+ 'lxml>=4.3.0',\n ]\n extras_require = {}\n cpython_dependencies = [\n- 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n ]\n if has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n- # Earlier lxml versions are affected by\n- # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n- # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n- # generate the C headers of lxml release tarballs published since then, the\n- # first of which was:\n- 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\n else:\n@@ -84,7 +78,6 @@\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n@@ -95,7 +88,7 @@\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n- python_requires='>=3.6',\n+ python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n )\n", "issue": "Drop Python 3.6 support\n[It went end-of-life on December 2021](https://endoflife.date/python).\n", "before_files": [{"content": "import warnings\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n\n\nwarnings.warn(\n \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2,\n)\n", "path": "scrapy/utils/py36.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and web scraping framework written for Python\n\"\"\"\n\nimport pkgutil\nimport sys\nimport warnings\n\nfrom twisted import version as _txv\n\n# Declare top-level shortcuts\nfrom scrapy.spiders import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\n\n__all__ = [\n '__version__', 'version_info', 'twisted_version', 'Spider',\n 'Request', 'FormRequest', 'Selector', 'Item', 'Field',\n]\n\n\n# Scrapy and Twisted versions\n__version__ = (pkgutil.get_data(__package__, \"VERSION\") or b\"\").decode(\"ascii\").strip()\nversion_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\n\n\n# Check minimum required Python version\nif sys.version_info < (3, 6):\n print(f\"Scrapy {__version__} requires Python 3.6+\")\n sys.exit(1)\n\n\n# Ignore noisy twisted deprecation warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\n\n\ndel pkgutil\ndel sys\ndel warnings\n", "path": "scrapy/__init__.py"}], "after_files": [{"content": null, "path": "scrapy/utils/py36.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=18.9.0',\n 'cryptography>=2.8',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=19.1.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n 'lxml>=4.3.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and web scraping framework written for Python\n\"\"\"\n\nimport pkgutil\nimport sys\nimport warnings\n\nfrom twisted import version as _txv\n\n# Declare top-level shortcuts\nfrom scrapy.spiders import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\n\n__all__ = [\n '__version__', 'version_info', 'twisted_version', 'Spider',\n 'Request', 'FormRequest', 'Selector', 'Item', 'Field',\n]\n\n\n# Scrapy and Twisted versions\n__version__ = (pkgutil.get_data(__package__, \"VERSION\") or b\"\").decode(\"ascii\").strip()\nversion_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\n\n\n# Check minimum required Python version\nif sys.version_info < (3, 7):\n print(f\"Scrapy {__version__} requires Python 3.7+\")\n sys.exit(1)\n\n\n# Ignore noisy twisted deprecation warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\n\n\ndel pkgutil\ndel sys\ndel warnings\n", "path": "scrapy/__init__.py"}]}
1,894
887
gh_patches_debug_351
rasdani/github-patches
git_diff
ethereum__web3.py-3083
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RuntimeError: release unlocked lock * Version: 6.8.0 * Python: 3.11.1 * OS: linux * `pip freeze` output ``` ξ‚± pip freeze 4167ms aiofiles==23.1.0 aiohttp==3.8.5 aiosignal==1.3.1 alembic==1.11.3 async-timeout==4.0.2 asyncpg==0.28.0 attrs==23.1.0 base58==2.1.1 bitarray==2.7.5 certifi==2023.5.7 charset-normalizer==3.1.0 cytoolz==0.12.1 ecs-logging==2.1.0 eth-abi==4.1.0 eth-account==0.9.0 eth-hash==0.5.2 eth-keyfile==0.6.1 eth-keys==0.4.0 eth-rlp==0.3.0 eth-typing==3.4.0 eth-utils==2.2.0 frozenlist==1.3.3 grpcio==1.57.0 grpcio-tools==1.57.0 hexbytes==0.3.1 html5tagger==1.3.0 httptools==0.5.0 idna==3.4 jsonschema==4.17.3 lru-dict==1.2.0 Mako==1.2.4 MarkupSafe==2.1.2 multidict==6.0.4 numpy==1.25.2 parsimonious==0.9.0 prometheus-client==0.17.1 protobuf==4.23.0 pycryptodome==3.18.0 pydantic==1.10.12 pyrsistent==0.19.3 pyunormalize==15.0.0 PyYAML==6.0 redis==5.0.0 regex==2023.6.3 requests==2.31.0 rlp==3.0.0 sanic==23.6.0 sanic-ext==23.6.0 sanic-routing==23.6.0 SQLAlchemy==2.0.20 toolz==0.12.0 tracerite==1.1.0 typing_extensions==4.5.0 ujson==5.7.0 urllib3==2.0.2 uvloop==0.17.0 web3==6.8.0 websockets==11.0.3 yarl==1.9.2 ``` ### What was wrong? * The code which produced the error ```py provider = AsyncHTTPProvider(request.app.config.get("ETHEREUM_MAINNET_URL")) w3 = AsyncWeb3(provider) contract = w3.eth.contract( address=MAINNET_TOKEN_ADDRESS_DETECTION, abi=single_call_balance_checker_abi, ) address_keys = list(TOKEN_METADATA_MAP.keys()) (native_balance, balance_values) = await asyncio.gather( w3.eth.get_balance(to_checksum_address(address)), contract.functions.balances( [to_checksum_address(address)], address_keys, ).call(), ) ``` * The full output of the error ```py File "handle_request", line 97, in handle_request File "/app/data_service/ethereum/views/balances.py", line 54, in get_balances (native_balance, balance_values) = await asyncio.gather( ^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/eth/async_eth.py", line 435, in get_balance return await self._get_balance(account, block_identifier) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/module.py", line 114, in caller result = await async_w3.manager.coro_request( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 264, in coro_request response = await self._coro_make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 199, in _coro_make_request return await request_func(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/gas_price_strategy.py", line 126, in middleware return await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/names.py", line 139, in middleware return await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/attrdict.py", line 69, in middleware response = await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/formatting.py", line 165, in middleware response = await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/buffered_gas_estimate.py", line 58, in middleware return await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/middleware/exception_retry_request.py", line 151, in middleware return await make_request(method, params) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/providers/async_rpc.py", line 91, in make_request raw_response = await async_make_post_request( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 239, in async_make_post_request response = await async_get_response_from_post_request( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 231, in async_get_response_from_post_request session = await async_cache_and_return_session(endpoint_uri) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 141, in async_cache_and_return_session async with async_lock(_async_session_pool, _async_session_cache_lock): File "/usr/local/lib/python3.11/contextlib.py", line 204, in __aenter__ return await anext(self.gen) ^^^^^^^^^^^^^^^^^^^^^ File "/app/.venv/lib/python3.11/site-packages/web3/_utils/async_caching.py", line 21, in async_lock lock.release() ``` * What type of node you were connecting to. AsyncHTTPProvider (Infura) ### How can it be fixed? I tried to reproduce this error locally, but it only occurs in production. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `web3/_utils/async_caching.py` Content: ``` 1 import asyncio 2 from concurrent.futures import ( 3 ThreadPoolExecutor, 4 ) 5 import contextlib 6 import threading 7 from typing import ( 8 AsyncGenerator, 9 ) 10 11 12 @contextlib.asynccontextmanager 13 async def async_lock( 14 thread_pool: ThreadPoolExecutor, lock: threading.Lock 15 ) -> AsyncGenerator[None, None]: 16 loop = asyncio.get_event_loop() 17 try: 18 await loop.run_in_executor(thread_pool, lock.acquire) 19 yield 20 finally: 21 lock.release() 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/web3/_utils/async_caching.py b/web3/_utils/async_caching.py --- a/web3/_utils/async_caching.py +++ b/web3/_utils/async_caching.py @@ -18,4 +18,5 @@ await loop.run_in_executor(thread_pool, lock.acquire) yield finally: - lock.release() + if lock.locked(): + lock.release()
{"golden_diff": "diff --git a/web3/_utils/async_caching.py b/web3/_utils/async_caching.py\n--- a/web3/_utils/async_caching.py\n+++ b/web3/_utils/async_caching.py\n@@ -18,4 +18,5 @@\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n- lock.release()\n+ if lock.locked():\n+ lock.release()\n", "issue": "RuntimeError: release unlocked lock\n* Version: 6.8.0\r\n* Python: 3.11.1\r\n* OS: linux\r\n* `pip freeze` output\r\n\r\n```\r\n\ue0b1 pip freeze 4167ms\r\naiofiles==23.1.0\r\naiohttp==3.8.5\r\naiosignal==1.3.1\r\nalembic==1.11.3\r\nasync-timeout==4.0.2\r\nasyncpg==0.28.0\r\nattrs==23.1.0\r\nbase58==2.1.1\r\nbitarray==2.7.5\r\ncertifi==2023.5.7\r\ncharset-normalizer==3.1.0\r\ncytoolz==0.12.1\r\necs-logging==2.1.0\r\neth-abi==4.1.0\r\neth-account==0.9.0\r\neth-hash==0.5.2\r\neth-keyfile==0.6.1\r\neth-keys==0.4.0\r\neth-rlp==0.3.0\r\neth-typing==3.4.0\r\neth-utils==2.2.0\r\nfrozenlist==1.3.3\r\ngrpcio==1.57.0\r\ngrpcio-tools==1.57.0\r\nhexbytes==0.3.1\r\nhtml5tagger==1.3.0\r\nhttptools==0.5.0\r\nidna==3.4\r\njsonschema==4.17.3\r\nlru-dict==1.2.0\r\nMako==1.2.4\r\nMarkupSafe==2.1.2\r\nmultidict==6.0.4\r\nnumpy==1.25.2\r\nparsimonious==0.9.0\r\nprometheus-client==0.17.1\r\nprotobuf==4.23.0\r\npycryptodome==3.18.0\r\npydantic==1.10.12\r\npyrsistent==0.19.3\r\npyunormalize==15.0.0\r\nPyYAML==6.0\r\nredis==5.0.0\r\nregex==2023.6.3\r\nrequests==2.31.0\r\nrlp==3.0.0\r\nsanic==23.6.0\r\nsanic-ext==23.6.0\r\nsanic-routing==23.6.0\r\nSQLAlchemy==2.0.20\r\ntoolz==0.12.0\r\ntracerite==1.1.0\r\ntyping_extensions==4.5.0\r\nujson==5.7.0\r\nurllib3==2.0.2\r\nuvloop==0.17.0\r\nweb3==6.8.0\r\nwebsockets==11.0.3\r\nyarl==1.9.2\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\n\r\n* The code which produced the error\r\n```py\r\n provider = AsyncHTTPProvider(request.app.config.get(\"ETHEREUM_MAINNET_URL\"))\r\n w3 = AsyncWeb3(provider)\r\n\r\n contract = w3.eth.contract(\r\n address=MAINNET_TOKEN_ADDRESS_DETECTION,\r\n abi=single_call_balance_checker_abi,\r\n )\r\n address_keys = list(TOKEN_METADATA_MAP.keys())\r\n\r\n (native_balance, balance_values) = await asyncio.gather(\r\n w3.eth.get_balance(to_checksum_address(address)),\r\n contract.functions.balances(\r\n [to_checksum_address(address)],\r\n address_keys,\r\n ).call(),\r\n )\r\n```\r\n* The full output of the error\r\n```py\r\nFile \"handle_request\", line 97, in handle_request\r\n File \"/app/data_service/ethereum/views/balances.py\", line 54, in get_balances\r\n (native_balance, balance_values) = await asyncio.gather(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/eth/async_eth.py\", line 435, in get_balance\r\n return await self._get_balance(account, block_identifier)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/module.py\", line 114, in caller\r\n result = await async_w3.manager.coro_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/manager.py\", line 264, in coro_request\r\n response = await self._coro_make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/manager.py\", line 199, in _coro_make_request\r\n return await request_func(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/gas_price_strategy.py\", line 126, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/names.py\", line 139, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/attrdict.py\", line 69, in middleware\r\n response = await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/formatting.py\", line 165, in middleware\r\n response = await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/buffered_gas_estimate.py\", line 58, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/exception_retry_request.py\", line 151, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/providers/async_rpc.py\", line 91, in make_request\r\n raw_response = await async_make_post_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 239, in async_make_post_request\r\n response = await async_get_response_from_post_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 231, in async_get_response_from_post_request\r\n session = await async_cache_and_return_session(endpoint_uri)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 141, in async_cache_and_return_session\r\n async with async_lock(_async_session_pool, _async_session_cache_lock):\r\n File \"/usr/local/lib/python3.11/contextlib.py\", line 204, in __aenter__\r\n return await anext(self.gen)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/async_caching.py\", line 21, in async_lock\r\n lock.release()\r\n```\r\n* What type of node you were connecting to.\r\nAsyncHTTPProvider (Infura)\r\n\r\n\r\n### How can it be fixed?\r\n\r\n\r\nI tried to reproduce this error locally, but it only occurs in production. \n", "before_files": [{"content": "import asyncio\nfrom concurrent.futures import (\n ThreadPoolExecutor,\n)\nimport contextlib\nimport threading\nfrom typing import (\n AsyncGenerator,\n)\n\n\[email protected]\nasync def async_lock(\n thread_pool: ThreadPoolExecutor, lock: threading.Lock\n) -> AsyncGenerator[None, None]:\n loop = asyncio.get_event_loop()\n try:\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n lock.release()\n", "path": "web3/_utils/async_caching.py"}], "after_files": [{"content": "import asyncio\nfrom concurrent.futures import (\n ThreadPoolExecutor,\n)\nimport contextlib\nimport threading\nfrom typing import (\n AsyncGenerator,\n)\n\n\[email protected]\nasync def async_lock(\n thread_pool: ThreadPoolExecutor, lock: threading.Lock\n) -> AsyncGenerator[None, None]:\n loop = asyncio.get_event_loop()\n try:\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n if lock.locked():\n lock.release()\n", "path": "web3/_utils/async_caching.py"}]}
2,155
95
gh_patches_debug_10492
rasdani/github-patches
git_diff
fossasia__open-event-server-2532
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: Kubernetes Deployment Info shown on heroku Do not try to show Kubernetes deployment info when not hosted on Kubernetes --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/views/admin/super_admin/super_admin.py` Content: ``` 1 import os 2 3 from flask_admin import expose 4 5 from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView 6 from ....helpers.data_getter import DataGetter 7 from app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count 8 from app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER 9 from app.helpers.kubernetes import KubernetesApi 10 11 12 class SuperAdminView(SuperAdminBaseView): 13 14 @expose('/') 15 def index_view(self): 16 events = DataGetter.get_all_events()[:5] 17 number_live_events = get_count(DataGetter.get_all_live_events()) 18 number_draft_events = get_count(DataGetter.get_all_draft_events()) 19 number_past_events = get_count(DataGetter.get_all_past_events()) 20 super_admins = DataGetter.get_all_super_admins() 21 admins = DataGetter.get_all_admins() 22 registered_users = DataGetter.get_all_registered_users() 23 # TODO Fix function and correct this 24 organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER)) 25 co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER)) 26 track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER)) 27 attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE)) 28 accepted_sessions = DataGetter.get_all_accepted_sessions() 29 rejected_sessions = DataGetter.get_all_rejected_sessions() 30 draft_sessions = DataGetter.get_all_draft_sessions() 31 email_times = DataGetter.get_email_by_times() 32 33 commit_info = None 34 version = None 35 on_kubernetes = False 36 pods_info = None 37 38 if KubernetesApi.is_on_kubernetes(): 39 on_kubernetes = True 40 kubernetes_api = KubernetesApi() 41 pods_info = kubernetes_api.get_pods()['items'] 42 version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git') 43 commit_info = os.getenv('BRANCH', 'development') 44 else: 45 version = get_latest_heroku_release() 46 commit_info = None 47 commit_number = None 48 if version: 49 commit_number = version['description'].split(' ')[1] 50 commit_info = get_commit_info(commit_number) 51 52 return self.render('/gentelella/admin/super_admin/widgets/index.html', 53 events=events, 54 version=version, 55 commit_info=commit_info, 56 on_kubernetes=True, 57 pods_info=pods_info, 58 number_live_events=number_live_events, 59 number_draft_events=number_draft_events, 60 number_past_events=number_past_events, 61 super_admins=super_admins, 62 admins=admins, 63 registered_users=registered_users, 64 organizers=organizers, 65 co_organizers=co_organizers, 66 track_organizers=track_organizers, 67 attendees=attendees, 68 accepted_sessions=accepted_sessions, 69 rejected_sessions=rejected_sessions, 70 draft_sessions=draft_sessions, 71 email_times=email_times) 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py --- a/app/views/admin/super_admin/super_admin.py +++ b/app/views/admin/super_admin/super_admin.py @@ -53,7 +53,7 @@ events=events, version=version, commit_info=commit_info, - on_kubernetes=True, + on_kubernetes=on_kubernetes, pods_info=pods_info, number_live_events=number_live_events, number_draft_events=number_draft_events,
{"golden_diff": "diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py\n--- a/app/views/admin/super_admin/super_admin.py\n+++ b/app/views/admin/super_admin/super_admin.py\n@@ -53,7 +53,7 @@\n events=events,\n version=version,\n commit_info=commit_info,\n- on_kubernetes=True,\n+ on_kubernetes=on_kubernetes,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n", "issue": "Bug: Kubernetes Deployment Info shown on heroku\nDo not try to show Kubernetes deployment info when not hosted on Kubernetes\n", "before_files": [{"content": "import os\n\nfrom flask_admin import expose\n\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView\nfrom ....helpers.data_getter import DataGetter\nfrom app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count\nfrom app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER\nfrom app.helpers.kubernetes import KubernetesApi\n\n\nclass SuperAdminView(SuperAdminBaseView):\n\n @expose('/')\n def index_view(self):\n events = DataGetter.get_all_events()[:5]\n number_live_events = get_count(DataGetter.get_all_live_events())\n number_draft_events = get_count(DataGetter.get_all_draft_events())\n number_past_events = get_count(DataGetter.get_all_past_events())\n super_admins = DataGetter.get_all_super_admins()\n admins = DataGetter.get_all_admins()\n registered_users = DataGetter.get_all_registered_users()\n # TODO Fix function and correct this\n organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))\n co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))\n track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))\n attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))\n accepted_sessions = DataGetter.get_all_accepted_sessions()\n rejected_sessions = DataGetter.get_all_rejected_sessions()\n draft_sessions = DataGetter.get_all_draft_sessions()\n email_times = DataGetter.get_email_by_times()\n\n commit_info = None\n version = None\n on_kubernetes = False\n pods_info = None\n\n if KubernetesApi.is_on_kubernetes():\n on_kubernetes = True\n kubernetes_api = KubernetesApi()\n pods_info = kubernetes_api.get_pods()['items']\n version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')\n commit_info = os.getenv('BRANCH', 'development')\n else:\n version = get_latest_heroku_release()\n commit_info = None\n commit_number = None\n if version:\n commit_number = version['description'].split(' ')[1]\n commit_info = get_commit_info(commit_number)\n\n return self.render('/gentelella/admin/super_admin/widgets/index.html',\n events=events,\n version=version,\n commit_info=commit_info,\n on_kubernetes=True,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n number_past_events=number_past_events,\n super_admins=super_admins,\n admins=admins,\n registered_users=registered_users,\n organizers=organizers,\n co_organizers=co_organizers,\n track_organizers=track_organizers,\n attendees=attendees,\n accepted_sessions=accepted_sessions,\n rejected_sessions=rejected_sessions,\n draft_sessions=draft_sessions,\n email_times=email_times)\n", "path": "app/views/admin/super_admin/super_admin.py"}], "after_files": [{"content": "import os\n\nfrom flask_admin import expose\n\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView\nfrom ....helpers.data_getter import DataGetter\nfrom app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count\nfrom app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER\nfrom app.helpers.kubernetes import KubernetesApi\n\n\nclass SuperAdminView(SuperAdminBaseView):\n\n @expose('/')\n def index_view(self):\n events = DataGetter.get_all_events()[:5]\n number_live_events = get_count(DataGetter.get_all_live_events())\n number_draft_events = get_count(DataGetter.get_all_draft_events())\n number_past_events = get_count(DataGetter.get_all_past_events())\n super_admins = DataGetter.get_all_super_admins()\n admins = DataGetter.get_all_admins()\n registered_users = DataGetter.get_all_registered_users()\n # TODO Fix function and correct this\n organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))\n co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))\n track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))\n attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))\n accepted_sessions = DataGetter.get_all_accepted_sessions()\n rejected_sessions = DataGetter.get_all_rejected_sessions()\n draft_sessions = DataGetter.get_all_draft_sessions()\n email_times = DataGetter.get_email_by_times()\n\n commit_info = None\n version = None\n on_kubernetes = False\n pods_info = None\n\n if KubernetesApi.is_on_kubernetes():\n on_kubernetes = True\n kubernetes_api = KubernetesApi()\n pods_info = kubernetes_api.get_pods()['items']\n version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')\n commit_info = os.getenv('BRANCH', 'development')\n else:\n version = get_latest_heroku_release()\n commit_info = None\n commit_number = None\n if version:\n commit_number = version['description'].split(' ')[1]\n commit_info = get_commit_info(commit_number)\n\n return self.render('/gentelella/admin/super_admin/widgets/index.html',\n events=events,\n version=version,\n commit_info=commit_info,\n on_kubernetes=on_kubernetes,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n number_past_events=number_past_events,\n super_admins=super_admins,\n admins=admins,\n registered_users=registered_users,\n organizers=organizers,\n co_organizers=co_organizers,\n track_organizers=track_organizers,\n attendees=attendees,\n accepted_sessions=accepted_sessions,\n rejected_sessions=rejected_sessions,\n draft_sessions=draft_sessions,\n email_times=email_times)\n", "path": "app/views/admin/super_admin/super_admin.py"}]}
1,059
123
gh_patches_debug_9975
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-852
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- # -*- coding: utf-8 -*- is not consistently used in all Python source files - Cookiecutter version: Cookiecutter 1.4.0 - Template project url: none - Python version: 3.5 - Operating System: OS X ### Description: # -_\- coding: utf-8 -_\- is not consistently used in all Python source files ### What I've run: Nothing, it's an observation of an inconsistency in the project. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import os 4 import io 5 import sys 6 7 from setuptools import setup 8 9 version = "1.4.0" 10 11 if sys.argv[-1] == 'publish': 12 os.system('python setup.py sdist upload') 13 os.system('python setup.py bdist_wheel upload') 14 sys.exit() 15 16 if sys.argv[-1] == 'tag': 17 os.system("git tag -a %s -m 'version %s'" % (version, version)) 18 os.system("git push --tags") 19 sys.exit() 20 21 with io.open('README.rst', 'r', encoding='utf-8') as readme_file: 22 readme = readme_file.read() 23 24 with io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file: 25 history = history_file.read().replace('.. :changelog:', '') 26 27 requirements = [ 28 'future>=0.15.2', 29 'binaryornot>=0.2.0', 30 'jinja2>=2.7', 31 'click>=5.0', 32 'whichcraft>=0.4.0', 33 'poyo>=0.1.0', 34 'jinja2-time>=0.1.0' 35 ] 36 37 long_description = readme + '\n\n' + history 38 39 if sys.argv[-1] == 'readme': 40 print(long_description) 41 sys.exit() 42 43 44 setup( 45 name='cookiecutter', 46 version=version, 47 description=('A command-line utility that creates projects from project ' 48 'templates, e.g. creating a Python package project from a ' 49 'Python package project template.'), 50 long_description=long_description, 51 author='Audrey Roy', 52 author_email='[email protected]', 53 url='https://github.com/audreyr/cookiecutter', 54 packages=[ 55 'cookiecutter', 56 ], 57 package_dir={'cookiecutter': 'cookiecutter'}, 58 entry_points={ 59 'console_scripts': [ 60 'cookiecutter = cookiecutter.__main__:main', 61 ] 62 }, 63 include_package_data=True, 64 install_requires=requirements, 65 license='BSD', 66 zip_safe=False, 67 classifiers=[ 68 'Development Status :: 5 - Production/Stable', 69 'Environment :: Console', 70 'Intended Audience :: Developers', 71 'Natural Language :: English', 72 'License :: OSI Approved :: BSD License', 73 'Programming Language :: Python', 74 'Programming Language :: Python :: 2', 75 'Programming Language :: Python :: 2.7', 76 'Programming Language :: Python :: 3', 77 'Programming Language :: Python :: 3.3', 78 'Programming Language :: Python :: 3.4', 79 'Programming Language :: Python :: 3.5', 80 'Programming Language :: Python :: Implementation :: CPython', 81 'Programming Language :: Python :: Implementation :: PyPy', 82 'Topic :: Software Development', 83 ], 84 keywords=( 85 'cookiecutter, Python, projects, project templates, Jinja2, ' 86 'skeleton, scaffolding, project directory, setup.py, package, ' 87 'packaging' 88 ), 89 ) 90 ``` Path: `cookiecutter/__main__.py` Content: ``` 1 """Allow cookiecutter to be executable through `python -m cookiecutter`.""" 2 from __future__ import absolute_import 3 4 from .cli import main 5 6 7 if __name__ == "__main__": # pragma: no cover 8 main(prog_name="cookiecutter") 9 ``` Path: `__main__.py` Content: ``` 1 """Allow cookiecutter to be executable from a checkout or zip file.""" 2 import runpy 3 4 5 if __name__ == "__main__": 6 runpy.run_module("cookiecutter", run_name="__main__") 7 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/__main__.py b/__main__.py --- a/__main__.py +++ b/__main__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Allow cookiecutter to be executable from a checkout or zip file.""" import runpy diff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py --- a/cookiecutter/__main__.py +++ b/cookiecutter/__main__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- """Allow cookiecutter to be executable through `python -m cookiecutter`.""" from __future__ import absolute_import diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# -*- coding: utf-8 -*- import os import io
{"golden_diff": "diff --git a/__main__.py b/__main__.py\n--- a/__main__.py\n+++ b/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\n import runpy\n \ndiff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py\n--- a/cookiecutter/__main__.py\n+++ b/cookiecutter/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\n from __future__ import absolute_import\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n #!/usr/bin/env python\n+# -*- coding: utf-8 -*-\n \n import os\n import io\n", "issue": "# -*- coding: utf-8 -*- is not consistently used in all Python source files\n- Cookiecutter version:\n Cookiecutter 1.4.0\n- Template project url:\n none\n- Python version:\n 3.5\n- Operating System:\n OS X\n ### Description:\n # -_\\- coding: utf-8 -_\\- is not consistently used in all Python source files\n### What I've run:\n\nNothing, it's an observation of an inconsistency in the project.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.4.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "\"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom .cli import main\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(prog_name=\"cookiecutter\")\n", "path": "cookiecutter/__main__.py"}, {"content": "\"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\nimport runpy\n\n\nif __name__ == \"__main__\":\n runpy.run_module(\"cookiecutter\", run_name=\"__main__\")\n", "path": "__main__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.4.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom .cli import main\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(prog_name=\"cookiecutter\")\n", "path": "cookiecutter/__main__.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\nimport runpy\n\n\nif __name__ == \"__main__\":\n runpy.run_module(\"cookiecutter\", run_name=\"__main__\")\n", "path": "__main__.py"}]}
1,353
205
gh_patches_debug_6498
rasdani/github-patches
git_diff
abey79__vpype-683
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Examples in `lreverse`'help are wrong As title says ☝🏻 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vpype_cli/layerops.py` Content: ``` 1 from __future__ import annotations 2 3 import random 4 5 import click 6 7 import vpype as vp 8 9 from .cli import cli 10 from .decorators import global_processor 11 from .types import LayerType, multiple_to_layer_ids, single_to_layer_id 12 13 __all__ = ("lcopy", "lmove", "ldelete", "lreverse", "lswap") 14 15 16 @cli.command(group="Layers") 17 @click.argument("sources", type=LayerType(accept_multiple=True)) 18 @click.argument("dest", type=LayerType(accept_new=True)) 19 @click.option( 20 "-p", 21 "--prob", 22 type=click.FloatRange(0.0, 1.0), 23 help="Path copy probability (default: 1.0).", 24 ) 25 @click.option("-m", "--no-prop", is_flag=True, help="Do not copy metadata.") 26 @global_processor 27 def lcopy(document, sources, dest, prob: float | None, no_prop: bool): 28 """Copy the content of one or more layer(s) to another layer. 29 30 SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers, 31 or a coma-separated, whitespace-free list of layer IDs. 32 33 DEST can be a layer ID or the string 'new', in which case a new layer with the 34 lowest available ID will be created. 35 36 If a layer is both in the source and destination, its content is not duplicated. 37 38 The `--prob` option controls the probability with which each path is copied. With a value 39 lower than 1.0, some paths will not be copied to DEST, which may be used to achieve random 40 coloring effects. 41 42 If a single source layer is specified and the `--prob` option is not used, the properties 43 of the source layer are copied to the destination layer, overwriting any existing 44 properties with the same name. This behaviour can be disabled with the `--no-prop` option. 45 46 Examples: 47 Copy layer 1 to a new layer: 48 49 vpype [...] lcopy 1 new [...] # duplicate layer 1 50 51 Make a new layer with a merged copy of layer 1 and 2: 52 53 vpype [...] lcopy 1,2 new [...] # make new layer with merged copy of layer 1 and 2 54 55 Add a merged copy of all layers to layer 1. If layer 1 previously had content, this \ 56 content is not duplicated: 57 58 vpype [...] lcopy all 1 [...] 59 """ 60 61 src_lids = multiple_to_layer_ids(sources, document) 62 dest_lid = single_to_layer_id(dest, document) 63 64 if dest_lid in src_lids: 65 src_lids.remove(dest_lid) 66 67 lc = vp.LineCollection() 68 for lid in src_lids: 69 if prob is not None: 70 for line in document[lid]: 71 if random.random() < prob: 72 lc.append(line) 73 else: 74 lc.extend(document[lid]) 75 76 if len(lc) > 0: 77 document.add(lc, dest_lid) 78 79 if len(src_lids) == 1 and prob is None and not no_prop: 80 document.layers[dest_lid].metadata.update(document.layers[src_lids[0]].metadata) 81 82 return document 83 84 85 @cli.command(group="Layers") 86 @click.argument("sources", type=LayerType(accept_multiple=True)) 87 @click.argument("dest", type=LayerType(accept_new=True)) 88 @click.option( 89 "-p", 90 "--prob", 91 type=click.FloatRange(0.0, 1.0), 92 help="Path move probability (default: 1.0).", 93 ) 94 @click.option("-m", "--no-prop", is_flag=True, help="Do not move metadata.") 95 @global_processor 96 def lmove(document, sources, dest, prob: float | None, no_prop: bool): 97 """Move the content of one or more layer(s) to another layer. 98 99 SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers, 100 or a coma-separated, whitespace-free list of layer IDs. 101 102 DEST can be a layer ID or the string 'new', in which case a new layer with the 103 lowest available ID will be created. 104 105 Layer(s) left empty after the move are then discarded and may thus be reused by subsequent 106 commands using 'new' as destination layer. 107 108 The `--prob` option controls the probability with which each path is moved. With a value 109 lower than 1.0, some paths will not be moved to DEST, which may be used to achieve random 110 coloring effects. 111 112 If a layer is both in the source and destination, its content is not duplicated. 113 114 If a single source layer is specified and the `--prob` option is not used, the properties 115 of the source layer are moved to the destination layer, overwriting any existing 116 properties with the same name. This behaviour can be disabled with the `--no-prop` option. 117 118 Examples: 119 Merge layer 1 and 2 to layer 1 (the content of layer 1 is not duplicated): 120 121 vpype [...] lmove 1,2 1 [...] # merge layer 1 and 2 to layer 1 122 """ 123 124 src_lids = multiple_to_layer_ids(sources, document) 125 dest_lid = single_to_layer_id(dest, document) 126 127 if dest_lid in document.layers: 128 dest_lc = document.layers[dest_lid].clone() 129 else: 130 dest_lc = vp.LineCollection() 131 132 move_metadata = len(src_lids) == 1 and prob is None and not no_prop 133 source_metadata = document.layers[src_lids[0]].metadata if move_metadata else {} 134 135 for lid in src_lids: 136 if prob is not None: 137 # split lines with provided probability 138 remaining_lines = vp.LineCollection() 139 moving_lines = vp.LineCollection() 140 for line in document.layers[lid]: 141 if random.random() < prob: 142 moving_lines.append(line) 143 else: 144 remaining_lines.append(line) 145 146 if len(remaining_lines) > 0: 147 document.replace(remaining_lines, lid) 148 else: 149 document.pop(lid) 150 151 if len(moving_lines) > 0: 152 dest_lc.extend(moving_lines) 153 else: 154 dest_lc.extend(document.pop(lid)) 155 if move_metadata: 156 dest_lc.metadata.update(source_metadata) 157 158 if len(dest_lc) > 0: 159 document.add(dest_lc, dest_lid, with_metadata=True) 160 return document 161 162 163 @cli.command(group="Layers") 164 @click.argument("layers", type=LayerType(accept_multiple=True)) 165 @click.option( 166 "-k", "--keep", is_flag=True, help="Specified layers must be kept instead of deleted." 167 ) 168 @click.option( 169 "-p", 170 "--prob", 171 type=click.FloatRange(0.0, 1.0), 172 help="Path deletion probability (default: 1.0).", 173 ) 174 @global_processor 175 def ldelete(document: vp.Document, layers, keep: bool, prob: float | None) -> vp.Document: 176 """Delete one or more layers. 177 178 LAYERS can be a single layer ID, the string 'all' (to delete all layers), or a 179 coma-separated, whitespace-free list of layer IDs. 180 181 If the `--keep` option is used, the specified layers are kept and, instead, all other 182 layers deleted. 183 184 The `--prob` option controls the probability with which each path is deleted. With a value 185 lower than 1.0, some paths will not be deleted. 186 """ 187 188 lids = set(multiple_to_layer_ids(layers, document)) 189 190 if keep: 191 lids = document.layers.keys() - lids 192 193 for lid in lids: 194 if prob is not None: 195 lc = document.layers[lid].clone() 196 for line in document[lid]: 197 if not random.random() < prob: 198 lc.append(line) 199 200 if len(lc) == 0: 201 document.pop(lid) 202 else: 203 document[lid] = lc 204 else: 205 document.pop(lid) 206 207 return document 208 209 210 @cli.command(group="Layers") 211 @click.argument("first", type=LayerType(accept_multiple=False, accept_new=False)) 212 @click.argument("second", type=LayerType(accept_multiple=False, accept_new=False)) 213 @click.option( 214 "-p", 215 "--prob", 216 type=click.FloatRange(0.0, 1.0), 217 help="Path deletion probability (default: 1.0).", 218 ) 219 @click.option("-m", "--no-prop", is_flag=True, help="Do not move metadata.") 220 @global_processor 221 def lswap( 222 document: vp.Document, first: int, second: int, prob: float | None, no_prop: bool 223 ) -> vp.Document: 224 """Swap the content between two layers 225 226 This command swaps the content of layers FIRST and SECOND. Both FIRST and SECOND must be 227 existing layer ids. 228 229 The `--prob` option controls the probability with which each path are swapped. With a value 230 lower than 1.0, some paths will remain in their original layer. 231 232 If the `--prob` option is not used, the layer properties are swapped between layers as 233 well. This behaviour can be disabled with the `--no-prop` option. 234 """ 235 236 first_lid = single_to_layer_id(first, document, must_exist=True) 237 second_lid = single_to_layer_id(second, document, must_exist=True) 238 239 if prob is None: 240 document.swap_content(first_lid, second_lid) 241 if not no_prop: 242 document.layers[first_lid].metadata, document.layers[second_lid].metadata = ( 243 document.layers[second_lid].metadata, 244 document.layers[first_lid].metadata, 245 ) 246 else: 247 new_first = vp.LineCollection() 248 new_second = vp.LineCollection() 249 250 for line in document.layers[first_lid]: 251 (new_second if random.random() < prob else new_first).append(line) 252 for line in document.layers[second_lid]: 253 (new_first if random.random() < prob else new_second).append(line) 254 255 document.replace(new_first, first_lid) 256 document.replace(new_second, second_lid) 257 258 return document 259 260 261 @cli.command(group="Layers") 262 @click.argument("layers", type=LayerType(accept_multiple=True, accept_new=False)) 263 @global_processor 264 def lreverse(document: vp.Document, layers) -> vp.Document: 265 """Reverse the path order within one or more layers. 266 267 This command reverses the order in which paths are ordered within layer(s) LAYERS. LAYERS 268 may be a single layer ID, multiple layer IDs (coma-separated without whitespace) or `all` 269 (to refer to every exising layers). 270 271 Examples: 272 Delete layer one: 273 274 $ vpype [...] ldelete 1 [...] 275 276 Delete layers 1 and 2: 277 278 $ vpype [...] ldelete 1,2 [...] 279 280 Delete all layers: 281 282 $ vpype [...] ldelete all [...] 283 """ 284 285 lids = set(multiple_to_layer_ids(layers, document)) 286 287 for layer_id in lids: 288 document.layers[layer_id].reverse() 289 290 return document 291 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vpype_cli/layerops.py b/vpype_cli/layerops.py --- a/vpype_cli/layerops.py +++ b/vpype_cli/layerops.py @@ -269,17 +269,9 @@ (to refer to every exising layers). Examples: - Delete layer one: + Reverse path order in layer 1: - $ vpype [...] ldelete 1 [...] - - Delete layers 1 and 2: - - $ vpype [...] ldelete 1,2 [...] - - Delete all layers: - - $ vpype [...] ldelete all [...] + $ vpype [...] lreverse 1 [...] """ lids = set(multiple_to_layer_ids(layers, document))
{"golden_diff": "diff --git a/vpype_cli/layerops.py b/vpype_cli/layerops.py\n--- a/vpype_cli/layerops.py\n+++ b/vpype_cli/layerops.py\n@@ -269,17 +269,9 @@\n (to refer to every exising layers).\n \n Examples:\n- Delete layer one:\n+ Reverse path order in layer 1:\n \n- $ vpype [...] ldelete 1 [...]\n-\n- Delete layers 1 and 2:\n-\n- $ vpype [...] ldelete 1,2 [...]\n-\n- Delete all layers:\n-\n- $ vpype [...] ldelete all [...]\n+ $ vpype [...] lreverse 1 [...]\n \"\"\"\n \n lids = set(multiple_to_layer_ids(layers, document))\n", "issue": "Examples in `lreverse`'help are wrong\nAs title says \u261d\ud83c\udffb \n", "before_files": [{"content": "from __future__ import annotations\n\nimport random\n\nimport click\n\nimport vpype as vp\n\nfrom .cli import cli\nfrom .decorators import global_processor\nfrom .types import LayerType, multiple_to_layer_ids, single_to_layer_id\n\n__all__ = (\"lcopy\", \"lmove\", \"ldelete\", \"lreverse\", \"lswap\")\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path copy probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not copy metadata.\")\n@global_processor\ndef lcopy(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Copy the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n The `--prob` option controls the probability with which each path is copied. With a value\n lower than 1.0, some paths will not be copied to DEST, which may be used to achieve random\n coloring effects.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are copied to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Copy layer 1 to a new layer:\n\n vpype [...] lcopy 1 new [...] # duplicate layer 1\n\n Make a new layer with a merged copy of layer 1 and 2:\n\n vpype [...] lcopy 1,2 new [...] # make new layer with merged copy of layer 1 and 2\n\n Add a merged copy of all layers to layer 1. If layer 1 previously had content, this \\\ncontent is not duplicated:\n\n vpype [...] lcopy all 1 [...]\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in src_lids:\n src_lids.remove(dest_lid)\n\n lc = vp.LineCollection()\n for lid in src_lids:\n if prob is not None:\n for line in document[lid]:\n if random.random() < prob:\n lc.append(line)\n else:\n lc.extend(document[lid])\n\n if len(lc) > 0:\n document.add(lc, dest_lid)\n\n if len(src_lids) == 1 and prob is None and not no_prop:\n document.layers[dest_lid].metadata.update(document.layers[src_lids[0]].metadata)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path move probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lmove(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Move the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n Layer(s) left empty after the move are then discarded and may thus be reused by subsequent\n commands using 'new' as destination layer.\n\n The `--prob` option controls the probability with which each path is moved. With a value\n lower than 1.0, some paths will not be moved to DEST, which may be used to achieve random\n coloring effects.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are moved to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Merge layer 1 and 2 to layer 1 (the content of layer 1 is not duplicated):\n\n vpype [...] lmove 1,2 1 [...] # merge layer 1 and 2 to layer 1\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in document.layers:\n dest_lc = document.layers[dest_lid].clone()\n else:\n dest_lc = vp.LineCollection()\n\n move_metadata = len(src_lids) == 1 and prob is None and not no_prop\n source_metadata = document.layers[src_lids[0]].metadata if move_metadata else {}\n\n for lid in src_lids:\n if prob is not None:\n # split lines with provided probability\n remaining_lines = vp.LineCollection()\n moving_lines = vp.LineCollection()\n for line in document.layers[lid]:\n if random.random() < prob:\n moving_lines.append(line)\n else:\n remaining_lines.append(line)\n\n if len(remaining_lines) > 0:\n document.replace(remaining_lines, lid)\n else:\n document.pop(lid)\n\n if len(moving_lines) > 0:\n dest_lc.extend(moving_lines)\n else:\n dest_lc.extend(document.pop(lid))\n if move_metadata:\n dest_lc.metadata.update(source_metadata)\n\n if len(dest_lc) > 0:\n document.add(dest_lc, dest_lid, with_metadata=True)\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True))\[email protected](\n \"-k\", \"--keep\", is_flag=True, help=\"Specified layers must be kept instead of deleted.\"\n)\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\n@global_processor\ndef ldelete(document: vp.Document, layers, keep: bool, prob: float | None) -> vp.Document:\n \"\"\"Delete one or more layers.\n\n LAYERS can be a single layer ID, the string 'all' (to delete all layers), or a\n coma-separated, whitespace-free list of layer IDs.\n\n If the `--keep` option is used, the specified layers are kept and, instead, all other\n layers deleted.\n\n The `--prob` option controls the probability with which each path is deleted. With a value\n lower than 1.0, some paths will not be deleted.\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n if keep:\n lids = document.layers.keys() - lids\n\n for lid in lids:\n if prob is not None:\n lc = document.layers[lid].clone()\n for line in document[lid]:\n if not random.random() < prob:\n lc.append(line)\n\n if len(lc) == 0:\n document.pop(lid)\n else:\n document[lid] = lc\n else:\n document.pop(lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"first\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\"second\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lswap(\n document: vp.Document, first: int, second: int, prob: float | None, no_prop: bool\n) -> vp.Document:\n \"\"\"Swap the content between two layers\n\n This command swaps the content of layers FIRST and SECOND. Both FIRST and SECOND must be\n existing layer ids.\n\n The `--prob` option controls the probability with which each path are swapped. With a value\n lower than 1.0, some paths will remain in their original layer.\n\n If the `--prob` option is not used, the layer properties are swapped between layers as\n well. This behaviour can be disabled with the `--no-prop` option.\n \"\"\"\n\n first_lid = single_to_layer_id(first, document, must_exist=True)\n second_lid = single_to_layer_id(second, document, must_exist=True)\n\n if prob is None:\n document.swap_content(first_lid, second_lid)\n if not no_prop:\n document.layers[first_lid].metadata, document.layers[second_lid].metadata = (\n document.layers[second_lid].metadata,\n document.layers[first_lid].metadata,\n )\n else:\n new_first = vp.LineCollection()\n new_second = vp.LineCollection()\n\n for line in document.layers[first_lid]:\n (new_second if random.random() < prob else new_first).append(line)\n for line in document.layers[second_lid]:\n (new_first if random.random() < prob else new_second).append(line)\n\n document.replace(new_first, first_lid)\n document.replace(new_second, second_lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True, accept_new=False))\n@global_processor\ndef lreverse(document: vp.Document, layers) -> vp.Document:\n \"\"\"Reverse the path order within one or more layers.\n\n This command reverses the order in which paths are ordered within layer(s) LAYERS. LAYERS\n may be a single layer ID, multiple layer IDs (coma-separated without whitespace) or `all`\n (to refer to every exising layers).\n\n Examples:\n Delete layer one:\n\n $ vpype [...] ldelete 1 [...]\n\n Delete layers 1 and 2:\n\n $ vpype [...] ldelete 1,2 [...]\n\n Delete all layers:\n\n $ vpype [...] ldelete all [...]\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n for layer_id in lids:\n document.layers[layer_id].reverse()\n\n return document\n", "path": "vpype_cli/layerops.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport random\n\nimport click\n\nimport vpype as vp\n\nfrom .cli import cli\nfrom .decorators import global_processor\nfrom .types import LayerType, multiple_to_layer_ids, single_to_layer_id\n\n__all__ = (\"lcopy\", \"lmove\", \"ldelete\", \"lreverse\", \"lswap\")\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path copy probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not copy metadata.\")\n@global_processor\ndef lcopy(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Copy the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n The `--prob` option controls the probability with which each path is copied. With a value\n lower than 1.0, some paths will not be copied to DEST, which may be used to achieve random\n coloring effects.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are copied to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Copy layer 1 to a new layer:\n\n vpype [...] lcopy 1 new [...] # duplicate layer 1\n\n Make a new layer with a merged copy of layer 1 and 2:\n\n vpype [...] lcopy 1,2 new [...] # make new layer with merged copy of layer 1 and 2\n\n Add a merged copy of all layers to layer 1. If layer 1 previously had content, this \\\ncontent is not duplicated:\n\n vpype [...] lcopy all 1 [...]\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in src_lids:\n src_lids.remove(dest_lid)\n\n lc = vp.LineCollection()\n for lid in src_lids:\n if prob is not None:\n for line in document[lid]:\n if random.random() < prob:\n lc.append(line)\n else:\n lc.extend(document[lid])\n\n if len(lc) > 0:\n document.add(lc, dest_lid)\n\n if len(src_lids) == 1 and prob is None and not no_prop:\n document.layers[dest_lid].metadata.update(document.layers[src_lids[0]].metadata)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path move probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lmove(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Move the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n Layer(s) left empty after the move are then discarded and may thus be reused by subsequent\n commands using 'new' as destination layer.\n\n The `--prob` option controls the probability with which each path is moved. With a value\n lower than 1.0, some paths will not be moved to DEST, which may be used to achieve random\n coloring effects.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are moved to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Merge layer 1 and 2 to layer 1 (the content of layer 1 is not duplicated):\n\n vpype [...] lmove 1,2 1 [...] # merge layer 1 and 2 to layer 1\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in document.layers:\n dest_lc = document.layers[dest_lid].clone()\n else:\n dest_lc = vp.LineCollection()\n\n move_metadata = len(src_lids) == 1 and prob is None and not no_prop\n source_metadata = document.layers[src_lids[0]].metadata if move_metadata else {}\n\n for lid in src_lids:\n if prob is not None:\n # split lines with provided probability\n remaining_lines = vp.LineCollection()\n moving_lines = vp.LineCollection()\n for line in document.layers[lid]:\n if random.random() < prob:\n moving_lines.append(line)\n else:\n remaining_lines.append(line)\n\n if len(remaining_lines) > 0:\n document.replace(remaining_lines, lid)\n else:\n document.pop(lid)\n\n if len(moving_lines) > 0:\n dest_lc.extend(moving_lines)\n else:\n dest_lc.extend(document.pop(lid))\n if move_metadata:\n dest_lc.metadata.update(source_metadata)\n\n if len(dest_lc) > 0:\n document.add(dest_lc, dest_lid, with_metadata=True)\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True))\[email protected](\n \"-k\", \"--keep\", is_flag=True, help=\"Specified layers must be kept instead of deleted.\"\n)\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\n@global_processor\ndef ldelete(document: vp.Document, layers, keep: bool, prob: float | None) -> vp.Document:\n \"\"\"Delete one or more layers.\n\n LAYERS can be a single layer ID, the string 'all' (to delete all layers), or a\n coma-separated, whitespace-free list of layer IDs.\n\n If the `--keep` option is used, the specified layers are kept and, instead, all other\n layers deleted.\n\n The `--prob` option controls the probability with which each path is deleted. With a value\n lower than 1.0, some paths will not be deleted.\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n if keep:\n lids = document.layers.keys() - lids\n\n for lid in lids:\n if prob is not None:\n lc = document.layers[lid].clone()\n for line in document[lid]:\n if not random.random() < prob:\n lc.append(line)\n\n if len(lc) == 0:\n document.pop(lid)\n else:\n document[lid] = lc\n else:\n document.pop(lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"first\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\"second\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lswap(\n document: vp.Document, first: int, second: int, prob: float | None, no_prop: bool\n) -> vp.Document:\n \"\"\"Swap the content between two layers\n\n This command swaps the content of layers FIRST and SECOND. Both FIRST and SECOND must be\n existing layer ids.\n\n The `--prob` option controls the probability with which each path are swapped. With a value\n lower than 1.0, some paths will remain in their original layer.\n\n If the `--prob` option is not used, the layer properties are swapped between layers as\n well. This behaviour can be disabled with the `--no-prop` option.\n \"\"\"\n\n first_lid = single_to_layer_id(first, document, must_exist=True)\n second_lid = single_to_layer_id(second, document, must_exist=True)\n\n if prob is None:\n document.swap_content(first_lid, second_lid)\n if not no_prop:\n document.layers[first_lid].metadata, document.layers[second_lid].metadata = (\n document.layers[second_lid].metadata,\n document.layers[first_lid].metadata,\n )\n else:\n new_first = vp.LineCollection()\n new_second = vp.LineCollection()\n\n for line in document.layers[first_lid]:\n (new_second if random.random() < prob else new_first).append(line)\n for line in document.layers[second_lid]:\n (new_first if random.random() < prob else new_second).append(line)\n\n document.replace(new_first, first_lid)\n document.replace(new_second, second_lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True, accept_new=False))\n@global_processor\ndef lreverse(document: vp.Document, layers) -> vp.Document:\n \"\"\"Reverse the path order within one or more layers.\n\n This command reverses the order in which paths are ordered within layer(s) LAYERS. LAYERS\n may be a single layer ID, multiple layer IDs (coma-separated without whitespace) or `all`\n (to refer to every exising layers).\n\n Examples:\n Reverse path order in layer 1:\n\n $ vpype [...] lreverse 1 [...]\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n for layer_id in lids:\n document.layers[layer_id].reverse()\n\n return document\n", "path": "vpype_cli/layerops.py"}]}
3,496
175
gh_patches_debug_50127
rasdani/github-patches
git_diff
scrapy__scrapy-4481
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- responsetypes.py ResponseTypes.from_headers typo as 'Content-type' Shall this line: https://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74 be changed to below? ` content_type=headers[b'Content-Type'], ` I guess this typo is with all the versions until today. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/responsetypes.py` Content: ``` 1 """ 2 This module implements a class which returns the appropriate Response class 3 based on different criteria. 4 """ 5 from mimetypes import MimeTypes 6 from pkgutil import get_data 7 from io import StringIO 8 9 from scrapy.http import Response 10 from scrapy.utils.misc import load_object 11 from scrapy.utils.python import binary_is_text, to_bytes, to_unicode 12 13 14 class ResponseTypes: 15 16 CLASSES = { 17 'text/html': 'scrapy.http.HtmlResponse', 18 'application/atom+xml': 'scrapy.http.XmlResponse', 19 'application/rdf+xml': 'scrapy.http.XmlResponse', 20 'application/rss+xml': 'scrapy.http.XmlResponse', 21 'application/xhtml+xml': 'scrapy.http.HtmlResponse', 22 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse', 23 'application/xml': 'scrapy.http.XmlResponse', 24 'application/json': 'scrapy.http.TextResponse', 25 'application/x-json': 'scrapy.http.TextResponse', 26 'application/json-amazonui-streaming': 'scrapy.http.TextResponse', 27 'application/javascript': 'scrapy.http.TextResponse', 28 'application/x-javascript': 'scrapy.http.TextResponse', 29 'text/xml': 'scrapy.http.XmlResponse', 30 'text/*': 'scrapy.http.TextResponse', 31 } 32 33 def __init__(self): 34 self.classes = {} 35 self.mimetypes = MimeTypes() 36 mimedata = get_data('scrapy', 'mime.types').decode('utf8') 37 self.mimetypes.readfp(StringIO(mimedata)) 38 for mimetype, cls in self.CLASSES.items(): 39 self.classes[mimetype] = load_object(cls) 40 41 def from_mimetype(self, mimetype): 42 """Return the most appropriate Response class for the given mimetype""" 43 if mimetype is None: 44 return Response 45 elif mimetype in self.classes: 46 return self.classes[mimetype] 47 else: 48 basetype = "%s/*" % mimetype.split('/')[0] 49 return self.classes.get(basetype, Response) 50 51 def from_content_type(self, content_type, content_encoding=None): 52 """Return the most appropriate Response class from an HTTP Content-Type 53 header """ 54 if content_encoding: 55 return Response 56 mimetype = to_unicode(content_type).split(';')[0].strip().lower() 57 return self.from_mimetype(mimetype) 58 59 def from_content_disposition(self, content_disposition): 60 try: 61 filename = to_unicode(content_disposition, 62 encoding='latin-1', errors='replace').split(';')[1].split('=')[1] 63 filename = filename.strip('"\'') 64 return self.from_filename(filename) 65 except IndexError: 66 return Response 67 68 def from_headers(self, headers): 69 """Return the most appropriate Response class by looking at the HTTP 70 headers""" 71 cls = Response 72 if b'Content-Type' in headers: 73 cls = self.from_content_type( 74 content_type=headers[b'Content-type'], 75 content_encoding=headers.get(b'Content-Encoding') 76 ) 77 if cls is Response and b'Content-Disposition' in headers: 78 cls = self.from_content_disposition(headers[b'Content-Disposition']) 79 return cls 80 81 def from_filename(self, filename): 82 """Return the most appropriate Response class from a file name""" 83 mimetype, encoding = self.mimetypes.guess_type(filename) 84 if mimetype and not encoding: 85 return self.from_mimetype(mimetype) 86 else: 87 return Response 88 89 def from_body(self, body): 90 """Try to guess the appropriate response based on the body content. 91 This method is a bit magic and could be improved in the future, but 92 it's not meant to be used except for special cases where response types 93 cannot be guess using more straightforward methods.""" 94 chunk = body[:5000] 95 chunk = to_bytes(chunk) 96 if not binary_is_text(chunk): 97 return self.from_mimetype('application/octet-stream') 98 elif b"<html>" in chunk.lower(): 99 return self.from_mimetype('text/html') 100 elif b"<?xml" in chunk.lower(): 101 return self.from_mimetype('text/xml') 102 else: 103 return self.from_mimetype('text') 104 105 def from_args(self, headers=None, url=None, filename=None, body=None): 106 """Guess the most appropriate Response class based on 107 the given arguments.""" 108 cls = Response 109 if headers is not None: 110 cls = self.from_headers(headers) 111 if cls is Response and url is not None: 112 cls = self.from_filename(url) 113 if cls is Response and filename is not None: 114 cls = self.from_filename(filename) 115 if cls is Response and body is not None: 116 cls = self.from_body(body) 117 return cls 118 119 120 responsetypes = ResponseTypes() 121 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py --- a/scrapy/responsetypes.py +++ b/scrapy/responsetypes.py @@ -71,7 +71,7 @@ cls = Response if b'Content-Type' in headers: cls = self.from_content_type( - content_type=headers[b'Content-type'], + content_type=headers[b'Content-Type'], content_encoding=headers.get(b'Content-Encoding') ) if cls is Response and b'Content-Disposition' in headers:
{"golden_diff": "diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py\n--- a/scrapy/responsetypes.py\n+++ b/scrapy/responsetypes.py\n@@ -71,7 +71,7 @@\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n- content_type=headers[b'Content-type'],\n+ content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n", "issue": "responsetypes.py ResponseTypes.from_headers typo as 'Content-type'\nShall this line:\r\n\r\nhttps://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74\r\n\r\nbe changed to below?\r\n`\r\ncontent_type=headers[b'Content-Type'],\r\n`\r\nI guess this typo is with all the versions until today.\n", "before_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import binary_is_text, to_bytes, to_unicode\n\n\nclass ResponseTypes:\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in self.CLASSES.items():\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_unicode(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}], "after_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import binary_is_text, to_bytes, to_unicode\n\n\nclass ResponseTypes:\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in self.CLASSES.items():\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_unicode(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}]}
1,617
121
gh_patches_debug_11555
rasdani/github-patches
git_diff
pypa__setuptools-753
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Setuptools doesn't play nice with Numpy See: https://github.com/numpy/numpy/blob/master/numpy/distutils/extension.py#L42-L52 That functionality was broken by: https://github.com/pypa/setuptools/blob/master/setuptools/extension.py#L39-L41 In this pr: https://github.com/pypa/setuptools/pull/718 Here's the the pdb session where I debugged this: ``` (Pdb) old_Extension.__module__ 'setuptools.extension' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/extension.py` Content: ``` 1 import sys 2 import re 3 import functools 4 import distutils.core 5 import distutils.errors 6 import distutils.extension 7 8 from setuptools.extern.six.moves import map 9 10 from .dist import _get_unpatched 11 from . import msvc 12 13 _Extension = _get_unpatched(distutils.core.Extension) 14 15 msvc.patch_for_specialized_compiler() 16 17 18 def _have_cython(): 19 """ 20 Return True if Cython can be imported. 21 """ 22 cython_impl = 'Cython.Distutils.build_ext' 23 try: 24 # from (cython_impl) import build_ext 25 __import__(cython_impl, fromlist=['build_ext']).build_ext 26 return True 27 except Exception: 28 pass 29 return False 30 31 32 # for compatibility 33 have_pyrex = _have_cython 34 35 36 class Extension(_Extension): 37 """Extension that uses '.c' files in place of '.pyx' files""" 38 39 def __init__(self, name, sources, py_limited_api=False, **kw): 40 self.py_limited_api = py_limited_api 41 _Extension.__init__(self, name, sources, **kw) 42 43 def _convert_pyx_sources_to_lang(self): 44 """ 45 Replace sources with .pyx extensions to sources with the target 46 language extension. This mechanism allows language authors to supply 47 pre-converted sources but to prefer the .pyx sources. 48 """ 49 if _have_cython(): 50 # the build has Cython, so allow it to compile the .pyx files 51 return 52 lang = self.language or '' 53 target_ext = '.cpp' if lang.lower() == 'c++' else '.c' 54 sub = functools.partial(re.sub, '.pyx$', target_ext) 55 self.sources = list(map(sub, self.sources)) 56 57 58 class Library(Extension): 59 """Just like a regular Extension, but built as a library instead""" 60 61 62 distutils.core.Extension = Extension 63 distutils.extension.Extension = Extension 64 if 'distutils.command.build_ext' in sys.modules: 65 sys.modules['distutils.command.build_ext'].Extension = Extension 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/extension.py b/setuptools/extension.py --- a/setuptools/extension.py +++ b/setuptools/extension.py @@ -36,9 +36,11 @@ class Extension(_Extension): """Extension that uses '.c' files in place of '.pyx' files""" - def __init__(self, name, sources, py_limited_api=False, **kw): - self.py_limited_api = py_limited_api - _Extension.__init__(self, name, sources, **kw) + def __init__(self, name, sources, *args, **kw): + # The *args is needed for compatibility as calls may use positional + # arguments. py_limited_api may be set only via keyword. + self.py_limited_api = kw.pop("py_limited_api", False) + _Extension.__init__(self, name, sources, *args, **kw) def _convert_pyx_sources_to_lang(self): """
{"golden_diff": "diff --git a/setuptools/extension.py b/setuptools/extension.py\n--- a/setuptools/extension.py\n+++ b/setuptools/extension.py\n@@ -36,9 +36,11 @@\n class Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n \n- def __init__(self, name, sources, py_limited_api=False, **kw):\n- self.py_limited_api = py_limited_api\n- _Extension.__init__(self, name, sources, **kw)\n+ def __init__(self, name, sources, *args, **kw):\n+ # The *args is needed for compatibility as calls may use positional\n+ # arguments. py_limited_api may be set only via keyword.\n+ self.py_limited_api = kw.pop(\"py_limited_api\", False)\n+ _Extension.__init__(self, name, sources, *args, **kw)\n \n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n", "issue": "Setuptools doesn't play nice with Numpy\nSee: https://github.com/numpy/numpy/blob/master/numpy/distutils/extension.py#L42-L52\n\nThat functionality was broken by:\nhttps://github.com/pypa/setuptools/blob/master/setuptools/extension.py#L39-L41\n\nIn this pr: https://github.com/pypa/setuptools/pull/718\n\nHere's the the pdb session where I debugged this:\n\n```\n(Pdb) old_Extension.__module__\n'setuptools.extension'\n```\n\n", "before_files": [{"content": "import sys\nimport re\nimport functools\nimport distutils.core\nimport distutils.errors\nimport distutils.extension\n\nfrom setuptools.extern.six.moves import map\n\nfrom .dist import _get_unpatched\nfrom . import msvc\n\n_Extension = _get_unpatched(distutils.core.Extension)\n\nmsvc.patch_for_specialized_compiler()\n\n\ndef _have_cython():\n \"\"\"\n Return True if Cython can be imported.\n \"\"\"\n cython_impl = 'Cython.Distutils.build_ext'\n try:\n # from (cython_impl) import build_ext\n __import__(cython_impl, fromlist=['build_ext']).build_ext\n return True\n except Exception:\n pass\n return False\n\n\n# for compatibility\nhave_pyrex = _have_cython\n\n\nclass Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n\n def __init__(self, name, sources, py_limited_api=False, **kw):\n self.py_limited_api = py_limited_api\n _Extension.__init__(self, name, sources, **kw)\n\n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n Replace sources with .pyx extensions to sources with the target\n language extension. This mechanism allows language authors to supply\n pre-converted sources but to prefer the .pyx sources.\n \"\"\"\n if _have_cython():\n # the build has Cython, so allow it to compile the .pyx files\n return\n lang = self.language or ''\n target_ext = '.cpp' if lang.lower() == 'c++' else '.c'\n sub = functools.partial(re.sub, '.pyx$', target_ext)\n self.sources = list(map(sub, self.sources))\n\n\nclass Library(Extension):\n \"\"\"Just like a regular Extension, but built as a library instead\"\"\"\n\n\ndistutils.core.Extension = Extension\ndistutils.extension.Extension = Extension\nif 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = Extension\n", "path": "setuptools/extension.py"}], "after_files": [{"content": "import sys\nimport re\nimport functools\nimport distutils.core\nimport distutils.errors\nimport distutils.extension\n\nfrom setuptools.extern.six.moves import map\n\nfrom .dist import _get_unpatched\nfrom . import msvc\n\n_Extension = _get_unpatched(distutils.core.Extension)\n\nmsvc.patch_for_specialized_compiler()\n\n\ndef _have_cython():\n \"\"\"\n Return True if Cython can be imported.\n \"\"\"\n cython_impl = 'Cython.Distutils.build_ext'\n try:\n # from (cython_impl) import build_ext\n __import__(cython_impl, fromlist=['build_ext']).build_ext\n return True\n except Exception:\n pass\n return False\n\n\n# for compatibility\nhave_pyrex = _have_cython\n\n\nclass Extension(_Extension):\n \"\"\"Extension that uses '.c' files in place of '.pyx' files\"\"\"\n\n def __init__(self, name, sources, *args, **kw):\n # The *args is needed for compatibility as calls may use positional\n # arguments. py_limited_api may be set only via keyword.\n self.py_limited_api = kw.pop(\"py_limited_api\", False)\n _Extension.__init__(self, name, sources, *args, **kw)\n\n def _convert_pyx_sources_to_lang(self):\n \"\"\"\n Replace sources with .pyx extensions to sources with the target\n language extension. This mechanism allows language authors to supply\n pre-converted sources but to prefer the .pyx sources.\n \"\"\"\n if _have_cython():\n # the build has Cython, so allow it to compile the .pyx files\n return\n lang = self.language or ''\n target_ext = '.cpp' if lang.lower() == 'c++' else '.c'\n sub = functools.partial(re.sub, '.pyx$', target_ext)\n self.sources = list(map(sub, self.sources))\n\n\nclass Library(Extension):\n \"\"\"Just like a regular Extension, but built as a library instead\"\"\"\n\n\ndistutils.core.Extension = Extension\ndistutils.extension.Extension = Extension\nif 'distutils.command.build_ext' in sys.modules:\n sys.modules['distutils.command.build_ext'].Extension = Extension\n", "path": "setuptools/extension.py"}]}
941
219
gh_patches_debug_22728
rasdani/github-patches
git_diff
holoviz__holoviews-5502
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- holoviews 1.15.1 doesn't work in Jupyterlite #### ALL software version info holoviews: 1.15.1 jupyterlite: Version 0.1.0-beta.13 #### Description of expected behavior and the observed behavior ```python import piplite await piplite.install('holoviews==1.15.1') import holoviews as hv hv.extension('bokeh') hv.Curve([1, 2, 3]) ``` raise following exception: ``` ImportError: cannot import name 'document' from 'js' (unknown location) ``` Here is the reason: https://github.com/jupyterlite/pyodide-kernel/issues/94 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `holoviews/pyodide.py` Content: ``` 1 import asyncio 2 import sys 3 4 5 from bokeh.document import Document 6 from bokeh.embed.elements import script_for_render_items 7 from bokeh.embed.util import standalone_docs_json_and_render_items 8 from bokeh.embed.wrappers import wrap_in_script_tag 9 from panel.io.pyodide import _link_docs 10 from panel.pane import panel as as_panel 11 12 from .core.dimension import LabelledData 13 from .core.options import Store 14 from .util import extension as _extension 15 16 17 #----------------------------------------------------------------------------- 18 # Private API 19 #----------------------------------------------------------------------------- 20 21 async def _link(ref, doc): 22 from js import Bokeh 23 rendered = Bokeh.index.object_keys() 24 if ref not in rendered: 25 await asyncio.sleep(0.1) 26 await _link(ref, doc) 27 return 28 views = Bokeh.index.object_values() 29 view = views[rendered.indexOf(ref)] 30 _link_docs(doc, view.model.document) 31 32 def render_html(obj): 33 from js import document 34 if hasattr(sys.stdout, '_out'): 35 target = sys.stdout._out # type: ignore 36 else: 37 raise ValueError("Could not determine target node to write to.") 38 doc = Document() 39 as_panel(obj).server_doc(doc, location=False) 40 docs_json, [render_item,] = standalone_docs_json_and_render_items( 41 doc.roots, suppress_callback_warning=True 42 ) 43 for root in doc.roots: 44 render_item.roots._roots[root] = target 45 document.getElementById(target).classList.add('bk-root') 46 script = script_for_render_items(docs_json, [render_item]) 47 asyncio.create_task(_link(doc.roots[0].ref['id'], doc)) 48 return {'text/html': wrap_in_script_tag(script)}, {} 49 50 def render_image(element, fmt): 51 """ 52 Used to render elements to an image format (svg or png) if requested 53 in the display formats. 54 """ 55 if fmt not in Store.display_formats: 56 return None 57 58 backend = Store.current_backend 59 if type(element) not in Store.registry[backend]: 60 return None 61 renderer = Store.renderers[backend] 62 plot = renderer.get_plot(element) 63 64 # Current renderer does not support the image format 65 if fmt not in renderer.param.objects('existing')['fig'].objects: 66 return None 67 68 data, info = renderer(plot, fmt=fmt) 69 return {info['mime_type']: data}, {} 70 71 def render_png(element): 72 return render_image(element, 'png') 73 74 def render_svg(element): 75 return render_image(element, 'svg') 76 77 #----------------------------------------------------------------------------- 78 # Public API 79 #----------------------------------------------------------------------------- 80 81 class pyodide_extension(_extension): 82 83 _loaded = False 84 85 def __call__(self, *args, **params): 86 super().__call__(*args, **params) 87 if not self._loaded: 88 Store.output_settings.initialize(list(Store.renderers.keys())) 89 Store.set_display_hook('html+js', LabelledData, render_html) 90 Store.set_display_hook('png', LabelledData, render_png) 91 Store.set_display_hook('svg', LabelledData, render_svg) 92 pyodide_extension._loaded = True 93 ``` Path: `holoviews/__init__.py` Content: ``` 1 """ 2 HoloViews makes data analysis and visualization simple 3 ====================================================== 4 5 HoloViews lets you focus on what you are trying to explore and convey, not on 6 the process of plotting. 7 8 HoloViews 9 10 - supports a wide range of data sources including Pandas, Dask, XArray 11 Rapids cuDF, Streamz, Intake, Geopandas, NetworkX and Ibis. 12 - supports the plotting backends Bokeh (default), Matplotlib and Plotly. 13 - allows you to drop into the rest of the 14 HoloViz ecosystem when more power or flexibility is needed. 15 16 For basic data exploration we recommend using the higher level hvPlot package, 17 which provides the familiar Pandas `.plot` api. You can drop into HoloViews 18 when needed. 19 20 To learn more check out https://holoviews.org/. To report issues or contribute 21 go to https://github.com/holoviz/holoviews. To join the community go to 22 https://discourse.holoviz.org/. 23 24 How to use HoloViews in 3 simple steps 25 -------------------------------------- 26 27 Work with the data source you already know and ❀️ 28 29 >>> import pandas as pd 30 >>> station_info = pd.read_csv('https://raw.githubusercontent.com/holoviz/holoviews/master/examples/assets/station_info.csv') 31 32 Import HoloViews and configure your plotting backend 33 34 >>> import holoviews as hv 35 >>> hv.extension('bokeh') 36 37 Annotate your data 38 39 >>> scatter = ( 40 ... hv.Scatter(station_info, kdims='services', vdims='ridership') 41 ... .redim( 42 ... services=hv.Dimension("services", label='Services'), 43 ... ridership=hv.Dimension("ridership", label='Ridership'), 44 ... ) 45 ... .opts(size=10, color="red", responsive=True) 46 ... ) 47 >>> scatter 48 49 In a notebook this will display a nice scatter plot. 50 51 Note that the `kdims` (The key dimension(s)) represents the independent 52 variable(s) and the `vdims` (value dimension(s)) the dependent variable(s). 53 54 For more check out https://holoviews.org/getting_started/Introduction.html 55 56 How to get help 57 --------------- 58 59 You can understand the structure of your objects by printing them. 60 61 >>> print(scatter) 62 :Scatter [services] (ridership) 63 64 You can get extensive documentation using `hv.help`. 65 66 >>> hv.help(scatter) 67 68 In a notebook or ipython environment the usual 69 70 - `help` and `?` will provide you with documentation. 71 - `TAB` and `SHIFT+TAB` completion will help you navigate. 72 73 To ask the community go to https://discourse.holoviz.org/. 74 To report issues go to https://github.com/holoviz/holoviews. 75 """ 76 import io, os, sys 77 78 import numpy as np # noqa (API import) 79 import param 80 81 __version__ = str(param.version.Version(fpath=__file__, archive_commit="$Format:%h$", 82 reponame="holoviews")) 83 84 from . import util # noqa (API import) 85 from .annotators import annotate # noqa (API import) 86 from .core import archive, config # noqa (API import) 87 from .core.boundingregion import BoundingBox # noqa (API import) 88 from .core.dimension import OrderedDict, Dimension # noqa (API import) 89 from .core.element import Element, Collator # noqa (API import) 90 from .core.layout import (Layout, NdLayout, Empty, # noqa (API import) 91 AdjointLayout) 92 from .core.ndmapping import NdMapping # noqa (API import) 93 from .core.options import (Options, Store, Cycle, # noqa (API import) 94 Palette, StoreOptions) 95 from .core.overlay import Overlay, NdOverlay # noqa (API import) 96 from .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import) 97 GridSpace, GridMatrix) 98 99 from .operation import Operation # noqa (API import) 100 from .element import * # noqa (API import) 101 from .element import __all__ as elements_list 102 from .selection import link_selections # noqa (API import) 103 from .util import (extension, renderer, output, opts, # noqa (API import) 104 render, save) 105 from .util.transform import dim # noqa (API import) 106 107 # Suppress warnings generated by NumPy in matplotlib 108 # Expected to be fixed in next matplotlib release 109 import warnings 110 warnings.filterwarnings("ignore", 111 message="elementwise comparison failed; returning scalar instead") 112 113 try: 114 import IPython # noqa (API import) 115 from .ipython import notebook_extension 116 extension = notebook_extension # noqa (name remapping) 117 except ImportError: 118 class notebook_extension(param.ParameterizedFunction): 119 def __call__(self, *args, **opts): # noqa (dummy signature) 120 raise Exception("IPython notebook not available: use hv.extension instead.") 121 122 if '_pyodide' in sys.modules: 123 from .pyodide import pyodide_extension as extension # noqa (API import) 124 125 # A single holoviews.rc file may be executed if found. 126 for rcfile in [os.environ.get("HOLOVIEWSRC", ''), 127 os.path.abspath(os.path.join(os.path.split(__file__)[0], 128 '..', 'holoviews.rc')), 129 "~/.holoviews.rc", 130 "~/.config/holoviews/holoviews.rc"]: 131 filename = os.path.expanduser(rcfile) 132 if os.path.isfile(filename): 133 with io.open(filename, encoding='utf8') as f: 134 code = compile(f.read(), filename, 'exec') 135 try: 136 exec(code) 137 except Exception as e: 138 print("Warning: Could not load %r [%r]" % (filename, str(e))) 139 del f, code 140 break 141 del filename 142 143 def help(obj, visualization=True, ansi=True, backend=None, 144 recursive=False, pattern=None): 145 """ 146 Extended version of the built-in help that supports parameterized 147 functions and objects. A pattern (regular expression) may be used to 148 filter the output and if recursive is set to True, documentation for 149 the supplied object is shown. Note that the recursive option will 150 only work with an object instance and not a class. 151 152 If ansi is set to False, all ANSI color 153 codes are stripped out. 154 """ 155 backend = backend if backend else Store.current_backend 156 info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization, 157 recursive=recursive, pattern=pattern, elements=elements_list) 158 159 msg = ("\nTo view the visualization options applicable to this " 160 "object or class, use:\n\n" 161 " holoviews.help(obj, visualization=True)\n\n") 162 if info: 163 print((msg if visualization is False else '') + info) 164 else: 165 import pydoc 166 pydoc.help(obj) 167 168 169 del io, np, os, rcfile, warnings 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/holoviews/__init__.py b/holoviews/__init__.py --- a/holoviews/__init__.py +++ b/holoviews/__init__.py @@ -120,7 +120,12 @@ raise Exception("IPython notebook not available: use hv.extension instead.") if '_pyodide' in sys.modules: - from .pyodide import pyodide_extension as extension # noqa (API import) + from .pyodide import pyodide_extension, in_jupyterlite + # The notebook_extension is needed inside jupyterlite, + # so the override is only done if we are not inside jupyterlite. + if not in_jupyterlite(): + extension = pyodide_extension + del pyodide_extension, in_jupyterlite # A single holoviews.rc file may be executed if found. for rcfile in [os.environ.get("HOLOVIEWSRC", ''), diff --git a/holoviews/pyodide.py b/holoviews/pyodide.py --- a/holoviews/pyodide.py +++ b/holoviews/pyodide.py @@ -74,6 +74,10 @@ def render_svg(element): return render_image(element, 'svg') +def in_jupyterlite(): + import js + return hasattr(js, "_JUPYTERLAB") + #----------------------------------------------------------------------------- # Public API #-----------------------------------------------------------------------------
{"golden_diff": "diff --git a/holoviews/__init__.py b/holoviews/__init__.py\n--- a/holoviews/__init__.py\n+++ b/holoviews/__init__.py\n@@ -120,7 +120,12 @@\n raise Exception(\"IPython notebook not available: use hv.extension instead.\")\n \n if '_pyodide' in sys.modules:\n- from .pyodide import pyodide_extension as extension # noqa (API import)\n+ from .pyodide import pyodide_extension, in_jupyterlite\n+ # The notebook_extension is needed inside jupyterlite,\n+ # so the override is only done if we are not inside jupyterlite.\n+ if not in_jupyterlite():\n+ extension = pyodide_extension\n+ del pyodide_extension, in_jupyterlite\n \n # A single holoviews.rc file may be executed if found.\n for rcfile in [os.environ.get(\"HOLOVIEWSRC\", ''),\ndiff --git a/holoviews/pyodide.py b/holoviews/pyodide.py\n--- a/holoviews/pyodide.py\n+++ b/holoviews/pyodide.py\n@@ -74,6 +74,10 @@\n def render_svg(element):\n return render_image(element, 'svg')\n \n+def in_jupyterlite():\n+ import js\n+ return hasattr(js, \"_JUPYTERLAB\")\n+\n #-----------------------------------------------------------------------------\n # Public API\n #-----------------------------------------------------------------------------\n", "issue": "holoviews 1.15.1 doesn't work in Jupyterlite\n#### ALL software version info\r\n\r\nholoviews: 1.15.1\r\njupyterlite: Version 0.1.0-beta.13\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\n```python\r\nimport piplite\r\nawait piplite.install('holoviews==1.15.1')\r\n\r\nimport holoviews as hv\r\nhv.extension('bokeh')\r\n\r\nhv.Curve([1, 2, 3])\r\n```\r\n\r\nraise following exception:\r\n\r\n```\r\nImportError: cannot import name 'document' from 'js' (unknown location)\r\n```\r\n\r\nHere is the reason:\r\n\r\nhttps://github.com/jupyterlite/pyodide-kernel/issues/94\n", "before_files": [{"content": "import asyncio\nimport sys\n\n\nfrom bokeh.document import Document\nfrom bokeh.embed.elements import script_for_render_items\nfrom bokeh.embed.util import standalone_docs_json_and_render_items\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom panel.io.pyodide import _link_docs\nfrom panel.pane import panel as as_panel\n\nfrom .core.dimension import LabelledData\nfrom .core.options import Store\nfrom .util import extension as _extension\n\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\nasync def _link(ref, doc):\n from js import Bokeh\n rendered = Bokeh.index.object_keys()\n if ref not in rendered:\n await asyncio.sleep(0.1)\n await _link(ref, doc)\n return\n views = Bokeh.index.object_values()\n view = views[rendered.indexOf(ref)]\n _link_docs(doc, view.model.document)\n\ndef render_html(obj):\n from js import document\n if hasattr(sys.stdout, '_out'):\n target = sys.stdout._out # type: ignore\n else:\n raise ValueError(\"Could not determine target node to write to.\")\n doc = Document()\n as_panel(obj).server_doc(doc, location=False)\n docs_json, [render_item,] = standalone_docs_json_and_render_items(\n doc.roots, suppress_callback_warning=True\n )\n for root in doc.roots:\n render_item.roots._roots[root] = target\n document.getElementById(target).classList.add('bk-root')\n script = script_for_render_items(docs_json, [render_item])\n asyncio.create_task(_link(doc.roots[0].ref['id'], doc))\n return {'text/html': wrap_in_script_tag(script)}, {}\n\ndef render_image(element, fmt):\n \"\"\"\n Used to render elements to an image format (svg or png) if requested\n in the display formats.\n \"\"\"\n if fmt not in Store.display_formats:\n return None\n\n backend = Store.current_backend\n if type(element) not in Store.registry[backend]:\n return None\n renderer = Store.renderers[backend]\n plot = renderer.get_plot(element)\n\n # Current renderer does not support the image format\n if fmt not in renderer.param.objects('existing')['fig'].objects:\n return None\n\n data, info = renderer(plot, fmt=fmt)\n return {info['mime_type']: data}, {}\n\ndef render_png(element):\n return render_image(element, 'png')\n\ndef render_svg(element):\n return render_image(element, 'svg')\n\n#-----------------------------------------------------------------------------\n# Public API\n#-----------------------------------------------------------------------------\n\nclass pyodide_extension(_extension):\n\n _loaded = False\n\n def __call__(self, *args, **params):\n super().__call__(*args, **params)\n if not self._loaded:\n Store.output_settings.initialize(list(Store.renderers.keys()))\n Store.set_display_hook('html+js', LabelledData, render_html)\n Store.set_display_hook('png', LabelledData, render_png)\n Store.set_display_hook('svg', LabelledData, render_svg)\n pyodide_extension._loaded = True\n", "path": "holoviews/pyodide.py"}, {"content": "\"\"\"\nHoloViews makes data analysis and visualization simple\n======================================================\n\nHoloViews lets you focus on what you are trying to explore and convey, not on\nthe process of plotting.\n\nHoloViews\n\n- supports a wide range of data sources including Pandas, Dask, XArray\nRapids cuDF, Streamz, Intake, Geopandas, NetworkX and Ibis.\n- supports the plotting backends Bokeh (default), Matplotlib and Plotly.\n- allows you to drop into the rest of the\nHoloViz ecosystem when more power or flexibility is needed.\n\nFor basic data exploration we recommend using the higher level hvPlot package,\nwhich provides the familiar Pandas `.plot` api. You can drop into HoloViews\nwhen needed.\n\nTo learn more check out https://holoviews.org/. To report issues or contribute\ngo to https://github.com/holoviz/holoviews. To join the community go to\nhttps://discourse.holoviz.org/.\n\nHow to use HoloViews in 3 simple steps\n--------------------------------------\n\nWork with the data source you already know and \u2764\ufe0f\n\n>>> import pandas as pd\n>>> station_info = pd.read_csv('https://raw.githubusercontent.com/holoviz/holoviews/master/examples/assets/station_info.csv')\n\nImport HoloViews and configure your plotting backend\n\n>>> import holoviews as hv\n>>> hv.extension('bokeh')\n\nAnnotate your data\n\n>>> scatter = (\n... hv.Scatter(station_info, kdims='services', vdims='ridership')\n... .redim(\n... services=hv.Dimension(\"services\", label='Services'),\n... ridership=hv.Dimension(\"ridership\", label='Ridership'),\n... )\n... .opts(size=10, color=\"red\", responsive=True)\n... )\n>>> scatter\n\nIn a notebook this will display a nice scatter plot.\n\nNote that the `kdims` (The key dimension(s)) represents the independent\nvariable(s) and the `vdims` (value dimension(s)) the dependent variable(s).\n\nFor more check out https://holoviews.org/getting_started/Introduction.html\n\nHow to get help\n---------------\n\nYou can understand the structure of your objects by printing them.\n\n>>> print(scatter)\n:Scatter [services] (ridership)\n\nYou can get extensive documentation using `hv.help`.\n\n>>> hv.help(scatter)\n\nIn a notebook or ipython environment the usual\n\n- `help` and `?` will provide you with documentation.\n- `TAB` and `SHIFT+TAB` completion will help you navigate.\n\nTo ask the community go to https://discourse.holoviz.org/.\nTo report issues go to https://github.com/holoviz/holoviews.\n\"\"\"\nimport io, os, sys\n\nimport numpy as np # noqa (API import)\nimport param\n\n__version__ = str(param.version.Version(fpath=__file__, archive_commit=\"$Format:%h$\",\n reponame=\"holoviews\"))\n\nfrom . import util # noqa (API import)\nfrom .annotators import annotate # noqa (API import)\nfrom .core import archive, config # noqa (API import)\nfrom .core.boundingregion import BoundingBox # noqa (API import)\nfrom .core.dimension import OrderedDict, Dimension # noqa (API import)\nfrom .core.element import Element, Collator # noqa (API import)\nfrom .core.layout import (Layout, NdLayout, Empty, # noqa (API import)\n AdjointLayout)\nfrom .core.ndmapping import NdMapping # noqa (API import)\nfrom .core.options import (Options, Store, Cycle, # noqa (API import)\n Palette, StoreOptions)\nfrom .core.overlay import Overlay, NdOverlay # noqa (API import)\nfrom .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import)\n GridSpace, GridMatrix)\n\nfrom .operation import Operation # noqa (API import)\nfrom .element import * # noqa (API import)\nfrom .element import __all__ as elements_list\nfrom .selection import link_selections # noqa (API import)\nfrom .util import (extension, renderer, output, opts, # noqa (API import)\n render, save)\nfrom .util.transform import dim # noqa (API import)\n\n# Suppress warnings generated by NumPy in matplotlib\n# Expected to be fixed in next matplotlib release\nimport warnings\nwarnings.filterwarnings(\"ignore\",\n message=\"elementwise comparison failed; returning scalar instead\")\n\ntry:\n import IPython # noqa (API import)\n from .ipython import notebook_extension\n extension = notebook_extension # noqa (name remapping)\nexcept ImportError:\n class notebook_extension(param.ParameterizedFunction):\n def __call__(self, *args, **opts): # noqa (dummy signature)\n raise Exception(\"IPython notebook not available: use hv.extension instead.\")\n\nif '_pyodide' in sys.modules:\n from .pyodide import pyodide_extension as extension # noqa (API import)\n\n# A single holoviews.rc file may be executed if found.\nfor rcfile in [os.environ.get(\"HOLOVIEWSRC\", ''),\n os.path.abspath(os.path.join(os.path.split(__file__)[0],\n '..', 'holoviews.rc')),\n \"~/.holoviews.rc\",\n \"~/.config/holoviews/holoviews.rc\"]:\n filename = os.path.expanduser(rcfile)\n if os.path.isfile(filename):\n with io.open(filename, encoding='utf8') as f:\n code = compile(f.read(), filename, 'exec')\n try:\n exec(code)\n except Exception as e:\n print(\"Warning: Could not load %r [%r]\" % (filename, str(e)))\n del f, code\n break\n del filename\n\ndef help(obj, visualization=True, ansi=True, backend=None,\n recursive=False, pattern=None):\n \"\"\"\n Extended version of the built-in help that supports parameterized\n functions and objects. A pattern (regular expression) may be used to\n filter the output and if recursive is set to True, documentation for\n the supplied object is shown. Note that the recursive option will\n only work with an object instance and not a class.\n\n If ansi is set to False, all ANSI color\n codes are stripped out.\n \"\"\"\n backend = backend if backend else Store.current_backend\n info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,\n recursive=recursive, pattern=pattern, elements=elements_list)\n\n msg = (\"\\nTo view the visualization options applicable to this \"\n \"object or class, use:\\n\\n\"\n \" holoviews.help(obj, visualization=True)\\n\\n\")\n if info:\n print((msg if visualization is False else '') + info)\n else:\n import pydoc\n pydoc.help(obj)\n\n\ndel io, np, os, rcfile, warnings\n", "path": "holoviews/__init__.py"}], "after_files": [{"content": "import asyncio\nimport sys\n\n\nfrom bokeh.document import Document\nfrom bokeh.embed.elements import script_for_render_items\nfrom bokeh.embed.util import standalone_docs_json_and_render_items\nfrom bokeh.embed.wrappers import wrap_in_script_tag\nfrom panel.io.pyodide import _link_docs\nfrom panel.pane import panel as as_panel\n\nfrom .core.dimension import LabelledData\nfrom .core.options import Store\nfrom .util import extension as _extension\n\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\nasync def _link(ref, doc):\n from js import Bokeh\n rendered = Bokeh.index.object_keys()\n if ref not in rendered:\n await asyncio.sleep(0.1)\n await _link(ref, doc)\n return\n views = Bokeh.index.object_values()\n view = views[rendered.indexOf(ref)]\n _link_docs(doc, view.model.document)\n\ndef render_html(obj):\n from js import document\n if hasattr(sys.stdout, '_out'):\n target = sys.stdout._out # type: ignore\n else:\n raise ValueError(\"Could not determine target node to write to.\")\n doc = Document()\n as_panel(obj).server_doc(doc, location=False)\n docs_json, [render_item,] = standalone_docs_json_and_render_items(\n doc.roots, suppress_callback_warning=True\n )\n for root in doc.roots:\n render_item.roots._roots[root] = target\n document.getElementById(target).classList.add('bk-root')\n script = script_for_render_items(docs_json, [render_item])\n asyncio.create_task(_link(doc.roots[0].ref['id'], doc))\n return {'text/html': wrap_in_script_tag(script)}, {}\n\ndef render_image(element, fmt):\n \"\"\"\n Used to render elements to an image format (svg or png) if requested\n in the display formats.\n \"\"\"\n if fmt not in Store.display_formats:\n return None\n\n backend = Store.current_backend\n if type(element) not in Store.registry[backend]:\n return None\n renderer = Store.renderers[backend]\n plot = renderer.get_plot(element)\n\n # Current renderer does not support the image format\n if fmt not in renderer.param.objects('existing')['fig'].objects:\n return None\n\n data, info = renderer(plot, fmt=fmt)\n return {info['mime_type']: data}, {}\n\ndef render_png(element):\n return render_image(element, 'png')\n\ndef render_svg(element):\n return render_image(element, 'svg')\n\ndef in_jupyterlite():\n import js\n return hasattr(js, \"_JUPYTERLAB\")\n\n#-----------------------------------------------------------------------------\n# Public API\n#-----------------------------------------------------------------------------\n\nclass pyodide_extension(_extension):\n\n _loaded = False\n\n def __call__(self, *args, **params):\n super().__call__(*args, **params)\n if not self._loaded:\n Store.output_settings.initialize(list(Store.renderers.keys()))\n Store.set_display_hook('html+js', LabelledData, render_html)\n Store.set_display_hook('png', LabelledData, render_png)\n Store.set_display_hook('svg', LabelledData, render_svg)\n pyodide_extension._loaded = True\n", "path": "holoviews/pyodide.py"}, {"content": "\"\"\"\nHoloViews makes data analysis and visualization simple\n======================================================\n\nHoloViews lets you focus on what you are trying to explore and convey, not on\nthe process of plotting.\n\nHoloViews\n\n- supports a wide range of data sources including Pandas, Dask, XArray\nRapids cuDF, Streamz, Intake, Geopandas, NetworkX and Ibis.\n- supports the plotting backends Bokeh (default), Matplotlib and Plotly.\n- allows you to drop into the rest of the\nHoloViz ecosystem when more power or flexibility is needed.\n\nFor basic data exploration we recommend using the higher level hvPlot package,\nwhich provides the familiar Pandas `.plot` api. You can drop into HoloViews\nwhen needed.\n\nTo learn more check out https://holoviews.org/. To report issues or contribute\ngo to https://github.com/holoviz/holoviews. To join the community go to\nhttps://discourse.holoviz.org/.\n\nHow to use HoloViews in 3 simple steps\n--------------------------------------\n\nWork with the data source you already know and \u2764\ufe0f\n\n>>> import pandas as pd\n>>> station_info = pd.read_csv('https://raw.githubusercontent.com/holoviz/holoviews/master/examples/assets/station_info.csv')\n\nImport HoloViews and configure your plotting backend\n\n>>> import holoviews as hv\n>>> hv.extension('bokeh')\n\nAnnotate your data\n\n>>> scatter = (\n... hv.Scatter(station_info, kdims='services', vdims='ridership')\n... .redim(\n... services=hv.Dimension(\"services\", label='Services'),\n... ridership=hv.Dimension(\"ridership\", label='Ridership'),\n... )\n... .opts(size=10, color=\"red\", responsive=True)\n... )\n>>> scatter\n\nIn a notebook this will display a nice scatter plot.\n\nNote that the `kdims` (The key dimension(s)) represents the independent\nvariable(s) and the `vdims` (value dimension(s)) the dependent variable(s).\n\nFor more check out https://holoviews.org/getting_started/Introduction.html\n\nHow to get help\n---------------\n\nYou can understand the structure of your objects by printing them.\n\n>>> print(scatter)\n:Scatter [services] (ridership)\n\nYou can get extensive documentation using `hv.help`.\n\n>>> hv.help(scatter)\n\nIn a notebook or ipython environment the usual\n\n- `help` and `?` will provide you with documentation.\n- `TAB` and `SHIFT+TAB` completion will help you navigate.\n\nTo ask the community go to https://discourse.holoviz.org/.\nTo report issues go to https://github.com/holoviz/holoviews.\n\"\"\"\nimport io, os, sys\n\nimport numpy as np # noqa (API import)\nimport param\n\n__version__ = str(param.version.Version(fpath=__file__, archive_commit=\"$Format:%h$\",\n reponame=\"holoviews\"))\n\nfrom . import util # noqa (API import)\nfrom .annotators import annotate # noqa (API import)\nfrom .core import archive, config # noqa (API import)\nfrom .core.boundingregion import BoundingBox # noqa (API import)\nfrom .core.dimension import OrderedDict, Dimension # noqa (API import)\nfrom .core.element import Element, Collator # noqa (API import)\nfrom .core.layout import (Layout, NdLayout, Empty, # noqa (API import)\n AdjointLayout)\nfrom .core.ndmapping import NdMapping # noqa (API import)\nfrom .core.options import (Options, Store, Cycle, # noqa (API import)\n Palette, StoreOptions)\nfrom .core.overlay import Overlay, NdOverlay # noqa (API import)\nfrom .core.spaces import (HoloMap, Callable, DynamicMap, # noqa (API import)\n GridSpace, GridMatrix)\n\nfrom .operation import Operation # noqa (API import)\nfrom .element import * # noqa (API import)\nfrom .element import __all__ as elements_list\nfrom .selection import link_selections # noqa (API import)\nfrom .util import (extension, renderer, output, opts, # noqa (API import)\n render, save)\nfrom .util.transform import dim # noqa (API import)\n\n# Suppress warnings generated by NumPy in matplotlib\n# Expected to be fixed in next matplotlib release\nimport warnings\nwarnings.filterwarnings(\"ignore\",\n message=\"elementwise comparison failed; returning scalar instead\")\n\ntry:\n import IPython # noqa (API import)\n from .ipython import notebook_extension\n extension = notebook_extension # noqa (name remapping)\nexcept ImportError:\n class notebook_extension(param.ParameterizedFunction):\n def __call__(self, *args, **opts): # noqa (dummy signature)\n raise Exception(\"IPython notebook not available: use hv.extension instead.\")\n\nif '_pyodide' in sys.modules:\n from .pyodide import pyodide_extension, in_jupyterlite\n # The notebook_extension is needed inside jupyterlite,\n # so the override is only done if we are not inside jupyterlite.\n if not in_jupyterlite():\n extension = pyodide_extension\n del pyodide_extension, in_jupyterlite\n\n# A single holoviews.rc file may be executed if found.\nfor rcfile in [os.environ.get(\"HOLOVIEWSRC\", ''),\n os.path.abspath(os.path.join(os.path.split(__file__)[0],\n '..', 'holoviews.rc')),\n \"~/.holoviews.rc\",\n \"~/.config/holoviews/holoviews.rc\"]:\n filename = os.path.expanduser(rcfile)\n if os.path.isfile(filename):\n with io.open(filename, encoding='utf8') as f:\n code = compile(f.read(), filename, 'exec')\n try:\n exec(code)\n except Exception as e:\n print(\"Warning: Could not load %r [%r]\" % (filename, str(e)))\n del f, code\n break\n del filename\n\ndef help(obj, visualization=True, ansi=True, backend=None,\n recursive=False, pattern=None):\n \"\"\"\n Extended version of the built-in help that supports parameterized\n functions and objects. A pattern (regular expression) may be used to\n filter the output and if recursive is set to True, documentation for\n the supplied object is shown. Note that the recursive option will\n only work with an object instance and not a class.\n\n If ansi is set to False, all ANSI color\n codes are stripped out.\n \"\"\"\n backend = backend if backend else Store.current_backend\n info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization,\n recursive=recursive, pattern=pattern, elements=elements_list)\n\n msg = (\"\\nTo view the visualization options applicable to this \"\n \"object or class, use:\\n\\n\"\n \" holoviews.help(obj, visualization=True)\\n\\n\")\n if info:\n print((msg if visualization is False else '') + info)\n else:\n import pydoc\n pydoc.help(obj)\n\n\ndel io, np, os, rcfile, warnings\n", "path": "holoviews/__init__.py"}]}
3,201
332
gh_patches_debug_4250
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-495
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default app name Use a default app name like "Python App" rather than the empty string, so if users forget to set it it still appears on the consle. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/core/config.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import logging 5 import os 6 import warnings 7 8 from scout_apm.compat import string_type 9 from scout_apm.core import platform_detection 10 11 logger = logging.getLogger(__name__) 12 13 14 class ScoutConfig(object): 15 """ 16 Configuration object for the ScoutApm agent. 17 18 Contains a list of configuration "layers". When a configuration key is 19 looked up, each layer is asked in turn if it knows the value. The first one 20 to answer affirmatively returns the value. 21 """ 22 23 def __init__(self): 24 self.layers = [ 25 Env(), 26 Python(), 27 Derived(self), 28 Defaults(), 29 Null(), 30 ] 31 32 def value(self, key): 33 value = self.locate_layer_for_key(key).value(key) 34 if key in CONVERSIONS: 35 return CONVERSIONS[key](value) 36 return value 37 38 def locate_layer_for_key(self, key): 39 for layer in self.layers: 40 if layer.has_config(key): 41 return layer 42 43 # Should be unreachable because Null returns None for all keys. 44 raise ValueError("key {!r} not found in any layer".format(key)) 45 46 def log(self): 47 logger.debug("Configuration Loaded:") 48 for key in self.known_keys(): 49 layer = self.locate_layer_for_key(key) 50 logger.debug( 51 "%-9s: %s = %s", layer.__class__.__name__, key, layer.value(key) 52 ) 53 54 def known_keys(self): 55 return [ 56 "app_server", 57 "application_root", 58 "core_agent_dir", 59 "core_agent_download", 60 "core_agent_launch", 61 "core_agent_log_level", 62 "core_agent_permissions", 63 "core_agent_version", 64 "disabled_instruments", 65 "download_url", 66 "framework", 67 "framework_version", 68 "hostname", 69 "ignore", 70 "key", 71 "log_level", 72 "monitor", 73 "name", 74 "revision_sha", 75 "scm_subdirectory", 76 "shutdown_timeout_seconds", 77 "socket_path", 78 ] 79 80 def core_agent_permissions(self): 81 try: 82 return int(str(self.value("core_agent_permissions")), 8) 83 except ValueError: 84 logger.exception( 85 "Invalid core_agent_permissions value, using default of 0o700" 86 ) 87 return 0o700 88 89 @classmethod 90 def set(cls, **kwargs): 91 """ 92 Sets a configuration value for the Scout agent. Values set here will 93 not override values set in ENV. 94 """ 95 for key, value in kwargs.items(): 96 SCOUT_PYTHON_VALUES[key] = value 97 98 @classmethod 99 def unset(cls, *keys): 100 """ 101 Removes a configuration value for the Scout agent. 102 """ 103 for key in keys: 104 SCOUT_PYTHON_VALUES.pop(key, None) 105 106 @classmethod 107 def reset_all(cls): 108 """ 109 Remove all configuration settings set via `ScoutConfig.set(...)`. 110 111 This is meant for use in testing. 112 """ 113 SCOUT_PYTHON_VALUES.clear() 114 115 116 # Module-level data, the ScoutConfig.set(key="value") adds to this 117 SCOUT_PYTHON_VALUES = {} 118 119 120 class Python(object): 121 """ 122 A configuration overlay that lets other parts of python set values. 123 """ 124 125 def has_config(self, key): 126 return key in SCOUT_PYTHON_VALUES 127 128 def value(self, key): 129 return SCOUT_PYTHON_VALUES[key] 130 131 132 class Env(object): 133 """ 134 Reads configuration from environment by prefixing the key 135 requested with "SCOUT_" 136 137 Example: the `key` config looks for SCOUT_KEY 138 environment variable 139 """ 140 141 def has_config(self, key): 142 env_key = self.modify_key(key) 143 return env_key in os.environ 144 145 def value(self, key): 146 env_key = self.modify_key(key) 147 return os.environ[env_key] 148 149 def modify_key(self, key): 150 env_key = ("SCOUT_" + key).upper() 151 return env_key 152 153 154 class Derived(object): 155 """ 156 A configuration overlay that calculates from other values. 157 """ 158 159 def __init__(self, config): 160 """ 161 config argument is the overall ScoutConfig var, so we can lookup the 162 components of the derived info. 163 """ 164 self.config = config 165 166 def has_config(self, key): 167 return self.lookup_func(key) is not None 168 169 def value(self, key): 170 return self.lookup_func(key)() 171 172 def lookup_func(self, key): 173 """ 174 Returns the derive_#{key} function, or None if it isn't defined 175 """ 176 func_name = "derive_" + key 177 return getattr(self, func_name, None) 178 179 def derive_socket_path(self): 180 return "{}/{}/scout-agent.sock".format( 181 self.config.value("core_agent_dir"), 182 self.config.value("core_agent_full_name"), 183 ) 184 185 def derive_core_agent_full_name(self): 186 triple = self.config.value("core_agent_triple") 187 if not platform_detection.is_valid_triple(triple): 188 warnings.warn("Invalid value for core_agent_triple: {}".format(triple)) 189 return "{name}-{version}-{triple}".format( 190 name="scout_apm_core", 191 version=self.config.value("core_agent_version"), 192 triple=triple, 193 ) 194 195 def derive_core_agent_triple(self): 196 return platform_detection.get_triple() 197 198 199 class Defaults(object): 200 """ 201 Provides default values for important configurations 202 """ 203 204 def __init__(self): 205 self.defaults = { 206 "app_server": "", 207 "application_root": "", 208 "core_agent_dir": "/tmp/scout_apm_core", 209 "core_agent_download": True, 210 "core_agent_launch": True, 211 "core_agent_log_level": "info", 212 "core_agent_permissions": 700, 213 "core_agent_version": "v1.2.6", # can be an exact tag name, or 'latest' 214 "disabled_instruments": [], 215 "download_url": "https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release", # noqa: E501 216 "framework": "", 217 "framework_version": "", 218 "hostname": None, 219 "key": "", 220 "monitor": False, 221 "name": "", 222 "revision_sha": self._git_revision_sha(), 223 "scm_subdirectory": "", 224 "shutdown_timeout_seconds": 2.0, 225 "uri_reporting": "filtered_params", 226 } 227 228 def _git_revision_sha(self): 229 # N.B. The environment variable SCOUT_REVISION_SHA may also be used, 230 # but that will be picked up by Env 231 return os.environ.get("HEROKU_SLUG_COMMIT", "") 232 233 def has_config(self, key): 234 return key in self.defaults 235 236 def value(self, key): 237 return self.defaults[key] 238 239 240 class Null(object): 241 """ 242 Always answers that a key is present, but the value is None 243 244 Used as the last step of the layered configuration. 245 """ 246 247 def has_config(self, key): 248 return True 249 250 def value(self, key): 251 return None 252 253 254 def convert_to_bool(value): 255 if isinstance(value, bool): 256 return value 257 if isinstance(value, string_type): 258 return value.lower() in ("yes", "true", "t", "1") 259 # Unknown type - default to false? 260 return False 261 262 263 def convert_to_float(value): 264 try: 265 return float(value) 266 except ValueError: 267 return 0.0 268 269 270 def convert_to_list(value): 271 if isinstance(value, list): 272 return value 273 if isinstance(value, tuple): 274 return list(value) 275 if isinstance(value, string_type): 276 # Split on commas 277 return [item.strip() for item in value.split(",") if item] 278 # Unknown type - default to empty? 279 return [] 280 281 282 CONVERSIONS = { 283 "core_agent_download": convert_to_bool, 284 "core_agent_launch": convert_to_bool, 285 "disabled_instruments": convert_to_list, 286 "ignore": convert_to_list, 287 "monitor": convert_to_bool, 288 "shutdown_timeout_seconds": convert_to_float, 289 } 290 291 292 scout_config = ScoutConfig() 293 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py --- a/src/scout_apm/core/config.py +++ b/src/scout_apm/core/config.py @@ -218,7 +218,7 @@ "hostname": None, "key": "", "monitor": False, - "name": "", + "name": "Python App", "revision_sha": self._git_revision_sha(), "scm_subdirectory": "", "shutdown_timeout_seconds": 2.0,
{"golden_diff": "diff --git a/src/scout_apm/core/config.py b/src/scout_apm/core/config.py\n--- a/src/scout_apm/core/config.py\n+++ b/src/scout_apm/core/config.py\n@@ -218,7 +218,7 @@\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n- \"name\": \"\",\n+ \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_timeout_seconds\": 2.0,\n", "issue": "Default app name\nUse a default app name like \"Python App\" rather than the empty string, so if users forget to set it it still appears on the consle.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport warnings\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n Env(),\n Python(),\n Derived(self),\n Defaults(),\n Null(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because Null returns None for all keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\n \"%-9s: %s = %s\", layer.__class__.__name__, key, layer.value(key)\n )\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_log_level\",\n \"core_agent_permissions\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"shutdown_timeout_seconds\",\n \"socket_path\",\n ]\n\n def core_agent_permissions(self):\n try:\n return int(str(self.value(\"core_agent_permissions\")), 8)\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass Python(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass Env(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `key` config looks for SCOUT_KEY\n environment variable\n \"\"\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass Derived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n warnings.warn(\"Invalid value for core_agent_triple: {}\".format(triple))\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass Defaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.6\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n \"name\": \"\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by Env\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\nclass Null(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_float(value):\n try:\n return float(value)\n except ValueError:\n return 0.0\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n}\n\n\nscout_config = ScoutConfig()\n", "path": "src/scout_apm/core/config.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport os\nimport warnings\n\nfrom scout_apm.compat import string_type\nfrom scout_apm.core import platform_detection\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScoutConfig(object):\n \"\"\"\n Configuration object for the ScoutApm agent.\n\n Contains a list of configuration \"layers\". When a configuration key is\n looked up, each layer is asked in turn if it knows the value. The first one\n to answer affirmatively returns the value.\n \"\"\"\n\n def __init__(self):\n self.layers = [\n Env(),\n Python(),\n Derived(self),\n Defaults(),\n Null(),\n ]\n\n def value(self, key):\n value = self.locate_layer_for_key(key).value(key)\n if key in CONVERSIONS:\n return CONVERSIONS[key](value)\n return value\n\n def locate_layer_for_key(self, key):\n for layer in self.layers:\n if layer.has_config(key):\n return layer\n\n # Should be unreachable because Null returns None for all keys.\n raise ValueError(\"key {!r} not found in any layer\".format(key))\n\n def log(self):\n logger.debug(\"Configuration Loaded:\")\n for key in self.known_keys():\n layer = self.locate_layer_for_key(key)\n logger.debug(\n \"%-9s: %s = %s\", layer.__class__.__name__, key, layer.value(key)\n )\n\n def known_keys(self):\n return [\n \"app_server\",\n \"application_root\",\n \"core_agent_dir\",\n \"core_agent_download\",\n \"core_agent_launch\",\n \"core_agent_log_level\",\n \"core_agent_permissions\",\n \"core_agent_version\",\n \"disabled_instruments\",\n \"download_url\",\n \"framework\",\n \"framework_version\",\n \"hostname\",\n \"ignore\",\n \"key\",\n \"log_level\",\n \"monitor\",\n \"name\",\n \"revision_sha\",\n \"scm_subdirectory\",\n \"shutdown_timeout_seconds\",\n \"socket_path\",\n ]\n\n def core_agent_permissions(self):\n try:\n return int(str(self.value(\"core_agent_permissions\")), 8)\n except ValueError:\n logger.exception(\n \"Invalid core_agent_permissions value, using default of 0o700\"\n )\n return 0o700\n\n @classmethod\n def set(cls, **kwargs):\n \"\"\"\n Sets a configuration value for the Scout agent. Values set here will\n not override values set in ENV.\n \"\"\"\n for key, value in kwargs.items():\n SCOUT_PYTHON_VALUES[key] = value\n\n @classmethod\n def unset(cls, *keys):\n \"\"\"\n Removes a configuration value for the Scout agent.\n \"\"\"\n for key in keys:\n SCOUT_PYTHON_VALUES.pop(key, None)\n\n @classmethod\n def reset_all(cls):\n \"\"\"\n Remove all configuration settings set via `ScoutConfig.set(...)`.\n\n This is meant for use in testing.\n \"\"\"\n SCOUT_PYTHON_VALUES.clear()\n\n\n# Module-level data, the ScoutConfig.set(key=\"value\") adds to this\nSCOUT_PYTHON_VALUES = {}\n\n\nclass Python(object):\n \"\"\"\n A configuration overlay that lets other parts of python set values.\n \"\"\"\n\n def has_config(self, key):\n return key in SCOUT_PYTHON_VALUES\n\n def value(self, key):\n return SCOUT_PYTHON_VALUES[key]\n\n\nclass Env(object):\n \"\"\"\n Reads configuration from environment by prefixing the key\n requested with \"SCOUT_\"\n\n Example: the `key` config looks for SCOUT_KEY\n environment variable\n \"\"\"\n\n def has_config(self, key):\n env_key = self.modify_key(key)\n return env_key in os.environ\n\n def value(self, key):\n env_key = self.modify_key(key)\n return os.environ[env_key]\n\n def modify_key(self, key):\n env_key = (\"SCOUT_\" + key).upper()\n return env_key\n\n\nclass Derived(object):\n \"\"\"\n A configuration overlay that calculates from other values.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n config argument is the overall ScoutConfig var, so we can lookup the\n components of the derived info.\n \"\"\"\n self.config = config\n\n def has_config(self, key):\n return self.lookup_func(key) is not None\n\n def value(self, key):\n return self.lookup_func(key)()\n\n def lookup_func(self, key):\n \"\"\"\n Returns the derive_#{key} function, or None if it isn't defined\n \"\"\"\n func_name = \"derive_\" + key\n return getattr(self, func_name, None)\n\n def derive_socket_path(self):\n return \"{}/{}/scout-agent.sock\".format(\n self.config.value(\"core_agent_dir\"),\n self.config.value(\"core_agent_full_name\"),\n )\n\n def derive_core_agent_full_name(self):\n triple = self.config.value(\"core_agent_triple\")\n if not platform_detection.is_valid_triple(triple):\n warnings.warn(\"Invalid value for core_agent_triple: {}\".format(triple))\n return \"{name}-{version}-{triple}\".format(\n name=\"scout_apm_core\",\n version=self.config.value(\"core_agent_version\"),\n triple=triple,\n )\n\n def derive_core_agent_triple(self):\n return platform_detection.get_triple()\n\n\nclass Defaults(object):\n \"\"\"\n Provides default values for important configurations\n \"\"\"\n\n def __init__(self):\n self.defaults = {\n \"app_server\": \"\",\n \"application_root\": \"\",\n \"core_agent_dir\": \"/tmp/scout_apm_core\",\n \"core_agent_download\": True,\n \"core_agent_launch\": True,\n \"core_agent_log_level\": \"info\",\n \"core_agent_permissions\": 700,\n \"core_agent_version\": \"v1.2.6\", # can be an exact tag name, or 'latest'\n \"disabled_instruments\": [],\n \"download_url\": \"https://s3-us-west-1.amazonaws.com/scout-public-downloads/apm_core_agent/release\", # noqa: E501\n \"framework\": \"\",\n \"framework_version\": \"\",\n \"hostname\": None,\n \"key\": \"\",\n \"monitor\": False,\n \"name\": \"Python App\",\n \"revision_sha\": self._git_revision_sha(),\n \"scm_subdirectory\": \"\",\n \"shutdown_timeout_seconds\": 2.0,\n \"uri_reporting\": \"filtered_params\",\n }\n\n def _git_revision_sha(self):\n # N.B. The environment variable SCOUT_REVISION_SHA may also be used,\n # but that will be picked up by Env\n return os.environ.get(\"HEROKU_SLUG_COMMIT\", \"\")\n\n def has_config(self, key):\n return key in self.defaults\n\n def value(self, key):\n return self.defaults[key]\n\n\nclass Null(object):\n \"\"\"\n Always answers that a key is present, but the value is None\n\n Used as the last step of the layered configuration.\n \"\"\"\n\n def has_config(self, key):\n return True\n\n def value(self, key):\n return None\n\n\ndef convert_to_bool(value):\n if isinstance(value, bool):\n return value\n if isinstance(value, string_type):\n return value.lower() in (\"yes\", \"true\", \"t\", \"1\")\n # Unknown type - default to false?\n return False\n\n\ndef convert_to_float(value):\n try:\n return float(value)\n except ValueError:\n return 0.0\n\n\ndef convert_to_list(value):\n if isinstance(value, list):\n return value\n if isinstance(value, tuple):\n return list(value)\n if isinstance(value, string_type):\n # Split on commas\n return [item.strip() for item in value.split(\",\") if item]\n # Unknown type - default to empty?\n return []\n\n\nCONVERSIONS = {\n \"core_agent_download\": convert_to_bool,\n \"core_agent_launch\": convert_to_bool,\n \"disabled_instruments\": convert_to_list,\n \"ignore\": convert_to_list,\n \"monitor\": convert_to_bool,\n \"shutdown_timeout_seconds\": convert_to_float,\n}\n\n\nscout_config = ScoutConfig()\n", "path": "src/scout_apm/core/config.py"}]}
2,851
120
gh_patches_debug_23602
rasdani/github-patches
git_diff
apluslms__a-plus-655
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Course points API endpoint should contain points for all submissions This comes from the IntelliJ project: https://github.com/Aalto-LeTech/intellij-plugin/issues/302 The `/api/v2/courses/<course-id>/points/me/` endpoint should be able to provide points for all submissions (for one student in all exercises of one course). We need to still consider if all points are always included in the output or only when some parameter is given in the request GET query parameters. All points should already be available in the points cache: https://github.com/apluslms/a-plus/blob/9e595a0a902d19bcadeeaff8b3160873b0265f43/exercise/cache/points.py#L98 Let's not modify the existing submissions URL list in order to preserve backwards-compatibility. A new key shall be added to the output. Example snippet for the output (the existing submissions list only contains the URLs of the submissions): ``` "submissions_and_points": [ { "url": "https://plus.cs.aalto.fi/api/v2/submissions/123/", "points": 10 }, { "url": "https://plus.cs.aalto.fi/api/v2/submissions/456/", "points": 5 } ] ``` Jaakko says that it could be best to add the grade field to the existing `SubmissionBriefSerializer` class. It could be more uniform with the rest of the API. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exercise/api/custom_serializers.py` Content: ``` 1 from rest_framework import serializers 2 from rest_framework.reverse import reverse 3 from course.api.serializers import CourseUsertagBriefSerializer 4 from lib.api.serializers import AlwaysListSerializer 5 from userprofile.api.serializers import UserBriefSerializer, UserListField 6 from ..cache.points import CachedPoints 7 from .full_serializers import SubmissionSerializer 8 9 10 class UserToTagSerializer(AlwaysListSerializer, CourseUsertagBriefSerializer): 11 12 class Meta(CourseUsertagBriefSerializer.Meta): 13 fields = CourseUsertagBriefSerializer.Meta.fields + ( 14 'name', 15 ) 16 17 18 class UserWithTagsSerializer(UserBriefSerializer): 19 tags = serializers.SerializerMethodField() 20 21 class Meta(UserBriefSerializer.Meta): 22 fields = UserBriefSerializer.Meta.fields + ( 23 'tags', 24 ) 25 26 def get_tags(self, obj): 27 view = self.context['view'] 28 ser = UserToTagSerializer( 29 obj.taggings.tags_for_instance(view.instance), 30 context=self.context 31 ) 32 return ser.data 33 34 35 class ExercisePointsSerializer(serializers.Serializer): 36 37 def to_representation(self, entry): 38 request = self.context['request'] 39 40 def exercise_url(exercise_id): 41 return reverse('api:exercise-detail', kwargs={ 42 'exercise_id': exercise_id, 43 }, request=request) 44 45 def submission_url(submission_id): 46 if submission_id is None: 47 return None 48 return reverse('api:submission-detail', kwargs={ 49 'submission_id': submission_id 50 }, request=request) 51 52 exercise_data = { 53 'url': exercise_url(entry['id']), 54 'best_submission': submission_url(entry['best_submission']), 55 'submissions': [submission_url(s['id']) for s in entry['submissions']], 56 } 57 for key in [ 58 # exercise 59 'id', 60 'name', 61 'difficulty', 62 'max_points', 63 'points_to_pass', 64 'submission_count', 65 # best submission 66 'points', 67 'passed', 68 # 'official', 69 ]: 70 exercise_data[key] = entry[key] 71 exercise_data['official'] = (entry['graded'] and 72 not entry.get('unconfirmed', False)) 73 return exercise_data 74 75 76 class UserPointsSerializer(UserWithTagsSerializer): 77 78 def to_representation(self, obj): 79 rep = super().to_representation(obj) 80 view = self.context['view'] 81 points = CachedPoints(view.instance, obj.user, view.content) 82 modules = [] 83 for module in points.modules_flatted(): 84 module_data = {} 85 for key in [ 86 'id', 'name', 87 'max_points', 'points_to_pass', 'submission_count', 88 'points', 'points_by_difficulty', 'passed', 89 ]: 90 module_data[key] = module[key] 91 92 exercises = [] 93 for entry in module['flatted']: 94 if entry['type'] == 'exercise' and entry['submittable']: 95 exercises.append( 96 ExercisePointsSerializer(entry, context=self.context).data 97 ) 98 module_data['exercises'] = exercises 99 modules.append(module_data) 100 101 total = points.total() 102 for key in ['submission_count', 'points', 'points_by_difficulty']: 103 rep[key] = total[key] 104 rep['modules'] = modules 105 106 return rep 107 108 109 class SubmitterStatsSerializer(UserWithTagsSerializer): 110 111 def to_representation(self, obj): 112 rep = super().to_representation(obj) 113 view = self.context['view'] 114 points = CachedPoints(view.instance, obj.user, view.content) 115 entry,_,_,_ = points.find(view.exercise) 116 data = ExercisePointsSerializer(entry, context=self.context).data 117 for key,value in data.items(): 118 rep[key] = value 119 return rep 120 ``` Path: `exercise/api/serializers.py` Content: ``` 1 from rest_framework import serializers 2 from rest_framework.reverse import reverse 3 4 from lib.api.fields import NestedHyperlinkedIdentityField 5 from lib.api.serializers import AplusModelSerializer, HtmlViewField 6 from userprofile.api.serializers import UserBriefSerializer 7 from ..models import Submission, SubmittedFile, BaseExercise 8 9 10 __all__ = [ 11 'ExerciseBriefSerializer', 12 'SubmissionBriefSerializer', 13 'SubmittedFileBriefSerializer', 14 'SubmitterStatsBriefSerializer', 15 ] 16 17 18 class ExerciseBriefSerializer(AplusModelSerializer): 19 url = NestedHyperlinkedIdentityField( 20 view_name='api:exercise-detail', 21 lookup_map='exercise.api.views.ExerciseViewSet', 22 ) 23 display_name = serializers.CharField(source='__str__') 24 25 class Meta(AplusModelSerializer.Meta): 26 model = BaseExercise 27 fields = ( 28 'url', 29 'html_url', 30 'display_name', 31 'max_points', 32 'max_submissions', 33 ) 34 35 36 class SubmissionBriefSerializer(AplusModelSerializer): 37 #display_name = serializers.CharField(source='__str__') 38 39 class Meta(AplusModelSerializer.Meta): 40 model = Submission 41 fields = ( 42 'submission_time', 43 ) 44 extra_kwargs = { 45 'url': { 46 'view_name': 'api:submission-detail', 47 'lookup_map': 'exercise.api.views.SubmissionViewSet', 48 } 49 } 50 51 52 class SubmittedFileBriefSerializer(AplusModelSerializer): 53 #url = HtmlViewField() 54 url = NestedHyperlinkedIdentityField( 55 view_name='api:submission-files-detail', 56 lookup_map='exercise.api.views.SubmissionFileViewSet', 57 ) 58 59 class Meta(AplusModelSerializer.Meta): 60 model = SubmittedFile 61 fields = ( 62 'url', 63 'filename', 64 'param_name', 65 ) 66 67 68 class SubmitterStatsBriefSerializer(UserBriefSerializer): 69 stats = serializers.SerializerMethodField() 70 71 def get_stats(self, profile): 72 return reverse( 73 'api:exercise-submitter_stats-detail', 74 kwargs={ 75 'exercise_id': self.context['view'].exercise.id, 76 'user_id': profile.user.id, 77 }, 78 request=self.context['request'] 79 ) 80 81 class Meta(UserBriefSerializer.Meta): 82 fields = UserBriefSerializer.Meta.fields + ( 83 'stats', 84 ) 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exercise/api/custom_serializers.py b/exercise/api/custom_serializers.py --- a/exercise/api/custom_serializers.py +++ b/exercise/api/custom_serializers.py @@ -49,10 +49,21 @@ 'submission_id': submission_id }, request=request) + def submission_obj(submission_cached): + id_ = submission_cached['id'] + return { + 'id': id_, + 'url': submission_url(id_), + 'submission_time': submission_cached['date'], + 'grade': submission_cached['points'], + } + + submissions = [submission_obj(s) for s in entry['submissions']] exercise_data = { 'url': exercise_url(entry['id']), 'best_submission': submission_url(entry['best_submission']), - 'submissions': [submission_url(s['id']) for s in entry['submissions']], + 'submissions': [s['url'] for s in submissions], + 'submissions_with_points': submissions, } for key in [ # exercise diff --git a/exercise/api/serializers.py b/exercise/api/serializers.py --- a/exercise/api/serializers.py +++ b/exercise/api/serializers.py @@ -40,6 +40,7 @@ model = Submission fields = ( 'submission_time', + 'grade', ) extra_kwargs = { 'url': {
{"golden_diff": "diff --git a/exercise/api/custom_serializers.py b/exercise/api/custom_serializers.py\n--- a/exercise/api/custom_serializers.py\n+++ b/exercise/api/custom_serializers.py\n@@ -49,10 +49,21 @@\n 'submission_id': submission_id\n }, request=request)\n \n+ def submission_obj(submission_cached):\n+ id_ = submission_cached['id']\n+ return {\n+ 'id': id_,\n+ 'url': submission_url(id_),\n+ 'submission_time': submission_cached['date'],\n+ 'grade': submission_cached['points'],\n+ }\n+\n+ submissions = [submission_obj(s) for s in entry['submissions']]\n exercise_data = {\n 'url': exercise_url(entry['id']),\n 'best_submission': submission_url(entry['best_submission']),\n- 'submissions': [submission_url(s['id']) for s in entry['submissions']],\n+ 'submissions': [s['url'] for s in submissions],\n+ 'submissions_with_points': submissions,\n }\n for key in [\n # exercise\ndiff --git a/exercise/api/serializers.py b/exercise/api/serializers.py\n--- a/exercise/api/serializers.py\n+++ b/exercise/api/serializers.py\n@@ -40,6 +40,7 @@\n model = Submission\n fields = (\n 'submission_time',\n+ 'grade',\n )\n extra_kwargs = {\n 'url': {\n", "issue": "Course points API endpoint should contain points for all submissions\nThis comes from the IntelliJ project: https://github.com/Aalto-LeTech/intellij-plugin/issues/302\r\n\r\nThe `/api/v2/courses/<course-id>/points/me/` endpoint should be able to provide points for all submissions (for one student in all exercises of one course). We need to still consider if all points are always included in the output or only when some parameter is given in the request GET query parameters. All points should already be available in the points cache: https://github.com/apluslms/a-plus/blob/9e595a0a902d19bcadeeaff8b3160873b0265f43/exercise/cache/points.py#L98\r\n\r\nLet's not modify the existing submissions URL list in order to preserve backwards-compatibility. A new key shall be added to the output.\r\n\r\nExample snippet for the output (the existing submissions list only contains the URLs of the submissions):\r\n\r\n```\r\n\"submissions_and_points\": [\r\n {\r\n \"url\": \"https://plus.cs.aalto.fi/api/v2/submissions/123/\",\r\n \"points\": 10\r\n },\r\n {\r\n \"url\": \"https://plus.cs.aalto.fi/api/v2/submissions/456/\",\r\n \"points\": 5\r\n }\r\n]\r\n```\r\n\r\nJaakko says that it could be best to add the grade field to the existing `SubmissionBriefSerializer` class. It could be more uniform with the rest of the API.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\nfrom course.api.serializers import CourseUsertagBriefSerializer\nfrom lib.api.serializers import AlwaysListSerializer\nfrom userprofile.api.serializers import UserBriefSerializer, UserListField\nfrom ..cache.points import CachedPoints\nfrom .full_serializers import SubmissionSerializer\n\n\nclass UserToTagSerializer(AlwaysListSerializer, CourseUsertagBriefSerializer):\n\n class Meta(CourseUsertagBriefSerializer.Meta):\n fields = CourseUsertagBriefSerializer.Meta.fields + (\n 'name',\n )\n\n\nclass UserWithTagsSerializer(UserBriefSerializer):\n tags = serializers.SerializerMethodField()\n\n class Meta(UserBriefSerializer.Meta):\n fields = UserBriefSerializer.Meta.fields + (\n 'tags',\n )\n\n def get_tags(self, obj):\n view = self.context['view']\n ser = UserToTagSerializer(\n obj.taggings.tags_for_instance(view.instance),\n context=self.context\n )\n return ser.data\n\n\nclass ExercisePointsSerializer(serializers.Serializer):\n\n def to_representation(self, entry):\n request = self.context['request']\n\n def exercise_url(exercise_id):\n return reverse('api:exercise-detail', kwargs={\n 'exercise_id': exercise_id,\n }, request=request)\n\n def submission_url(submission_id):\n if submission_id is None:\n return None\n return reverse('api:submission-detail', kwargs={\n 'submission_id': submission_id\n }, request=request)\n\n exercise_data = {\n 'url': exercise_url(entry['id']),\n 'best_submission': submission_url(entry['best_submission']),\n 'submissions': [submission_url(s['id']) for s in entry['submissions']],\n }\n for key in [\n # exercise\n 'id',\n 'name',\n 'difficulty',\n 'max_points',\n 'points_to_pass',\n 'submission_count',\n # best submission\n 'points',\n 'passed',\n # 'official',\n ]:\n exercise_data[key] = entry[key]\n exercise_data['official'] = (entry['graded'] and\n not entry.get('unconfirmed', False))\n return exercise_data\n\n\nclass UserPointsSerializer(UserWithTagsSerializer):\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n view = self.context['view']\n points = CachedPoints(view.instance, obj.user, view.content)\n modules = []\n for module in points.modules_flatted():\n module_data = {}\n for key in [\n 'id', 'name',\n 'max_points', 'points_to_pass', 'submission_count',\n 'points', 'points_by_difficulty', 'passed',\n ]:\n module_data[key] = module[key]\n\n exercises = []\n for entry in module['flatted']:\n if entry['type'] == 'exercise' and entry['submittable']:\n exercises.append(\n ExercisePointsSerializer(entry, context=self.context).data\n )\n module_data['exercises'] = exercises\n modules.append(module_data)\n\n total = points.total()\n for key in ['submission_count', 'points', 'points_by_difficulty']:\n rep[key] = total[key]\n rep['modules'] = modules\n\n return rep\n\n\nclass SubmitterStatsSerializer(UserWithTagsSerializer):\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n view = self.context['view']\n points = CachedPoints(view.instance, obj.user, view.content)\n entry,_,_,_ = points.find(view.exercise)\n data = ExercisePointsSerializer(entry, context=self.context).data\n for key,value in data.items():\n rep[key] = value\n return rep\n", "path": "exercise/api/custom_serializers.py"}, {"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom lib.api.fields import NestedHyperlinkedIdentityField\nfrom lib.api.serializers import AplusModelSerializer, HtmlViewField\nfrom userprofile.api.serializers import UserBriefSerializer\nfrom ..models import Submission, SubmittedFile, BaseExercise\n\n\n__all__ = [\n 'ExerciseBriefSerializer',\n 'SubmissionBriefSerializer',\n 'SubmittedFileBriefSerializer',\n 'SubmitterStatsBriefSerializer',\n]\n\n\nclass ExerciseBriefSerializer(AplusModelSerializer):\n url = NestedHyperlinkedIdentityField(\n view_name='api:exercise-detail',\n lookup_map='exercise.api.views.ExerciseViewSet',\n )\n display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = BaseExercise\n fields = (\n 'url',\n 'html_url',\n 'display_name',\n 'max_points',\n 'max_submissions',\n )\n\n\nclass SubmissionBriefSerializer(AplusModelSerializer):\n #display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = Submission\n fields = (\n 'submission_time',\n )\n extra_kwargs = {\n 'url': {\n 'view_name': 'api:submission-detail',\n 'lookup_map': 'exercise.api.views.SubmissionViewSet',\n }\n }\n\n\nclass SubmittedFileBriefSerializer(AplusModelSerializer):\n #url = HtmlViewField()\n url = NestedHyperlinkedIdentityField(\n view_name='api:submission-files-detail',\n lookup_map='exercise.api.views.SubmissionFileViewSet',\n )\n\n class Meta(AplusModelSerializer.Meta):\n model = SubmittedFile\n fields = (\n 'url',\n 'filename',\n 'param_name',\n )\n\n\nclass SubmitterStatsBriefSerializer(UserBriefSerializer):\n stats = serializers.SerializerMethodField()\n\n def get_stats(self, profile):\n return reverse(\n 'api:exercise-submitter_stats-detail',\n kwargs={\n 'exercise_id': self.context['view'].exercise.id,\n 'user_id': profile.user.id,\n },\n request=self.context['request']\n )\n\n class Meta(UserBriefSerializer.Meta):\n fields = UserBriefSerializer.Meta.fields + (\n 'stats',\n )\n", "path": "exercise/api/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\nfrom course.api.serializers import CourseUsertagBriefSerializer\nfrom lib.api.serializers import AlwaysListSerializer\nfrom userprofile.api.serializers import UserBriefSerializer, UserListField\nfrom ..cache.points import CachedPoints\nfrom .full_serializers import SubmissionSerializer\n\n\nclass UserToTagSerializer(AlwaysListSerializer, CourseUsertagBriefSerializer):\n\n class Meta(CourseUsertagBriefSerializer.Meta):\n fields = CourseUsertagBriefSerializer.Meta.fields + (\n 'name',\n )\n\n\nclass UserWithTagsSerializer(UserBriefSerializer):\n tags = serializers.SerializerMethodField()\n\n class Meta(UserBriefSerializer.Meta):\n fields = UserBriefSerializer.Meta.fields + (\n 'tags',\n )\n\n def get_tags(self, obj):\n view = self.context['view']\n ser = UserToTagSerializer(\n obj.taggings.tags_for_instance(view.instance),\n context=self.context\n )\n return ser.data\n\n\nclass ExercisePointsSerializer(serializers.Serializer):\n\n def to_representation(self, entry):\n request = self.context['request']\n\n def exercise_url(exercise_id):\n return reverse('api:exercise-detail', kwargs={\n 'exercise_id': exercise_id,\n }, request=request)\n\n def submission_url(submission_id):\n if submission_id is None:\n return None\n return reverse('api:submission-detail', kwargs={\n 'submission_id': submission_id\n }, request=request)\n\n def submission_obj(submission_cached):\n id_ = submission_cached['id']\n return {\n 'id': id_,\n 'url': submission_url(id_),\n 'submission_time': submission_cached['date'],\n 'grade': submission_cached['points'],\n }\n\n submissions = [submission_obj(s) for s in entry['submissions']]\n exercise_data = {\n 'url': exercise_url(entry['id']),\n 'best_submission': submission_url(entry['best_submission']),\n 'submissions': [s['url'] for s in submissions],\n 'submissions_with_points': submissions,\n }\n for key in [\n # exercise\n 'id',\n 'name',\n 'difficulty',\n 'max_points',\n 'points_to_pass',\n 'submission_count',\n # best submission\n 'points',\n 'passed',\n # 'official',\n ]:\n exercise_data[key] = entry[key]\n exercise_data['official'] = (entry['graded'] and\n not entry.get('unconfirmed', False))\n return exercise_data\n\n\nclass UserPointsSerializer(UserWithTagsSerializer):\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n view = self.context['view']\n points = CachedPoints(view.instance, obj.user, view.content)\n modules = []\n for module in points.modules_flatted():\n module_data = {}\n for key in [\n 'id', 'name',\n 'max_points', 'points_to_pass', 'submission_count',\n 'points', 'points_by_difficulty', 'passed',\n ]:\n module_data[key] = module[key]\n\n exercises = []\n for entry in module['flatted']:\n if entry['type'] == 'exercise' and entry['submittable']:\n exercises.append(\n ExercisePointsSerializer(entry, context=self.context).data\n )\n module_data['exercises'] = exercises\n modules.append(module_data)\n\n total = points.total()\n for key in ['submission_count', 'points', 'points_by_difficulty']:\n rep[key] = total[key]\n rep['modules'] = modules\n\n return rep\n\n\nclass SubmitterStatsSerializer(UserWithTagsSerializer):\n\n def to_representation(self, obj):\n rep = super().to_representation(obj)\n view = self.context['view']\n points = CachedPoints(view.instance, obj.user, view.content)\n entry,_,_,_ = points.find(view.exercise)\n data = ExercisePointsSerializer(entry, context=self.context).data\n for key,value in data.items():\n rep[key] = value\n return rep\n", "path": "exercise/api/custom_serializers.py"}, {"content": "from rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom lib.api.fields import NestedHyperlinkedIdentityField\nfrom lib.api.serializers import AplusModelSerializer, HtmlViewField\nfrom userprofile.api.serializers import UserBriefSerializer\nfrom ..models import Submission, SubmittedFile, BaseExercise\n\n\n__all__ = [\n 'ExerciseBriefSerializer',\n 'SubmissionBriefSerializer',\n 'SubmittedFileBriefSerializer',\n 'SubmitterStatsBriefSerializer',\n]\n\n\nclass ExerciseBriefSerializer(AplusModelSerializer):\n url = NestedHyperlinkedIdentityField(\n view_name='api:exercise-detail',\n lookup_map='exercise.api.views.ExerciseViewSet',\n )\n display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = BaseExercise\n fields = (\n 'url',\n 'html_url',\n 'display_name',\n 'max_points',\n 'max_submissions',\n )\n\n\nclass SubmissionBriefSerializer(AplusModelSerializer):\n #display_name = serializers.CharField(source='__str__')\n\n class Meta(AplusModelSerializer.Meta):\n model = Submission\n fields = (\n 'submission_time',\n 'grade',\n )\n extra_kwargs = {\n 'url': {\n 'view_name': 'api:submission-detail',\n 'lookup_map': 'exercise.api.views.SubmissionViewSet',\n }\n }\n\n\nclass SubmittedFileBriefSerializer(AplusModelSerializer):\n #url = HtmlViewField()\n url = NestedHyperlinkedIdentityField(\n view_name='api:submission-files-detail',\n lookup_map='exercise.api.views.SubmissionFileViewSet',\n )\n\n class Meta(AplusModelSerializer.Meta):\n model = SubmittedFile\n fields = (\n 'url',\n 'filename',\n 'param_name',\n )\n\n\nclass SubmitterStatsBriefSerializer(UserBriefSerializer):\n stats = serializers.SerializerMethodField()\n\n def get_stats(self, profile):\n return reverse(\n 'api:exercise-submitter_stats-detail',\n kwargs={\n 'exercise_id': self.context['view'].exercise.id,\n 'user_id': profile.user.id,\n },\n request=self.context['request']\n )\n\n class Meta(UserBriefSerializer.Meta):\n fields = UserBriefSerializer.Meta.fields + (\n 'stats',\n )\n", "path": "exercise/api/serializers.py"}]}
2,288
318
gh_patches_debug_35131
rasdani/github-patches
git_diff
tensorflow__addons-1230
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RSquare TypeError: tf__update_state() got an unexpected keyword argument 'sample_weight' **System information** - OS Platform and Distribution (e.g., Linux Ubuntu 16.04): ubuntu 18.04 - TensorFlow version and how it was installed (source or binary): 2.1.0 binary (conda) - TensorFlow-Addons version and how it was installed (source or binary): 0.8.3 binary(pip) - Python version: 3.7.6 - Is GPU used? (yes/no): yes **Describe the bug** The code goes wrong when I add "tfa.metrics.RSquare(dtype=tf.float32)" to model metrics. The exception is "TypeError: tf__update_state() got an unexpected keyword argument 'sample_weight'" And I don't see "sample_weight" parameter , what been added shown in #564 , of update_state() function in class RSquare in addons version 0.8.3. Are there something wrong with my installed tensorflow and addons package? **Code to reproduce the issue** Usage in my code: ``` python model.compile( loss='mse', optimizer=optimizer, metrics=['mae', 'mse', tfa.metrics.RSquare(dtype=tf.float32)] ) ``` **Other info / logs** Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tensorflow_addons/metrics/multilabel_confusion_matrix.py` Content: ``` 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Implements Multi-label confusion matrix scores.""" 16 17 import tensorflow as tf 18 from tensorflow.keras.metrics import Metric 19 import numpy as np 20 21 from typeguard import typechecked 22 from tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike 23 24 25 class MultiLabelConfusionMatrix(Metric): 26 """Computes Multi-label confusion matrix. 27 28 Class-wise confusion matrix is computed for the 29 evaluation of classification. 30 31 If multi-class input is provided, it will be treated 32 as multilabel data. 33 34 Consider classification problem with two classes 35 (i.e num_classes=2). 36 37 Resultant matrix `M` will be in the shape of (num_classes, 2, 2). 38 39 Every class `i` has a dedicated 2*2 matrix that contains: 40 41 - true negatives for class i in M(0,0) 42 - false positives for class i in M(0,1) 43 - false negatives for class i in M(1,0) 44 - true positives for class i in M(1,1) 45 46 ```python 47 # multilabel confusion matrix 48 y_true = tf.constant([[1, 0, 1], [0, 1, 0]], 49 dtype=tf.int32) 50 y_pred = tf.constant([[1, 0, 0],[0, 1, 1]], 51 dtype=tf.int32) 52 output = MultiLabelConfusionMatrix(num_classes=3) 53 output.update_state(y_true, y_pred) 54 print('Confusion matrix:', output.result().numpy()) 55 56 # Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]] 57 [[0 1] [1 0]]] 58 59 # if multiclass input is provided 60 y_true = tf.constant([[1, 0, 0], [0, 1, 0]], 61 dtype=tf.int32) 62 y_pred = tf.constant([[1, 0, 0],[0, 0, 1]], 63 dtype=tf.int32) 64 output = MultiLabelConfusionMatrix(num_classes=3) 65 output.update_state(y_true, y_pred) 66 print('Confusion matrix:', output.result().numpy()) 67 68 # Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]] 69 ``` 70 """ 71 72 @typechecked 73 def __init__( 74 self, 75 num_classes: FloatTensorLike, 76 name: str = "Multilabel_confusion_matrix", 77 dtype: AcceptableDTypes = None, 78 **kwargs 79 ): 80 super().__init__(name=name, dtype=dtype) 81 self.num_classes = num_classes 82 self.true_positives = self.add_weight( 83 "true_positives", 84 shape=[self.num_classes], 85 initializer="zeros", 86 dtype=self.dtype, 87 ) 88 self.false_positives = self.add_weight( 89 "false_positives", 90 shape=[self.num_classes], 91 initializer="zeros", 92 dtype=self.dtype, 93 ) 94 self.false_negatives = self.add_weight( 95 "false_negatives", 96 shape=[self.num_classes], 97 initializer="zeros", 98 dtype=self.dtype, 99 ) 100 self.true_negatives = self.add_weight( 101 "true_negatives", 102 shape=[self.num_classes], 103 initializer="zeros", 104 dtype=self.dtype, 105 ) 106 107 def update_state(self, y_true, y_pred): 108 y_true = tf.cast(y_true, tf.int32) 109 y_pred = tf.cast(y_pred, tf.int32) 110 # true positive 111 true_positive = tf.math.count_nonzero(y_true * y_pred, 0) 112 # predictions sum 113 pred_sum = tf.math.count_nonzero(y_pred, 0) 114 # true labels sum 115 true_sum = tf.math.count_nonzero(y_true, 0) 116 false_positive = pred_sum - true_positive 117 false_negative = true_sum - true_positive 118 y_true_negative = tf.math.not_equal(y_true, 1) 119 y_pred_negative = tf.math.not_equal(y_pred, 1) 120 true_negative = tf.math.count_nonzero( 121 tf.math.logical_and(y_true_negative, y_pred_negative), axis=0 122 ) 123 124 # true positive state update 125 self.true_positives.assign_add(tf.cast(true_positive, self.dtype)) 126 # false positive state update 127 self.false_positives.assign_add(tf.cast(false_positive, self.dtype)) 128 # false negative state update 129 self.false_negatives.assign_add(tf.cast(false_negative, self.dtype)) 130 # true negative state update 131 self.true_negatives.assign_add(tf.cast(true_negative, self.dtype)) 132 133 def result(self): 134 flat_confusion_matrix = tf.convert_to_tensor( 135 [ 136 self.true_negatives, 137 self.false_positives, 138 self.false_negatives, 139 self.true_positives, 140 ] 141 ) 142 # reshape into 2*2 matrix 143 confusion_matrix = tf.reshape(tf.transpose(flat_confusion_matrix), [-1, 2, 2]) 144 145 return confusion_matrix 146 147 def get_config(self): 148 """Returns the serializable config of the metric.""" 149 150 config = { 151 "num_classes": self.num_classes, 152 } 153 base_config = super().get_config() 154 return {**base_config, **config} 155 156 def reset_states(self): 157 self.true_positives.assign(np.zeros(self.num_classes), np.int32) 158 self.false_positives.assign(np.zeros(self.num_classes), np.int32) 159 self.false_negatives.assign(np.zeros(self.num_classes), np.int32) 160 self.true_negatives.assign(np.zeros(self.num_classes), np.int32) 161 ``` Path: `tensorflow_addons/metrics/r_square.py` Content: ``` 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Implements R^2 scores.""" 16 17 import tensorflow as tf 18 from tensorflow.keras.metrics import Metric 19 20 from typeguard import typechecked 21 from tensorflow_addons.utils.types import AcceptableDTypes 22 23 24 class RSquare(Metric): 25 """Compute R^2 score. 26 27 This is also called as coefficient of determination. 28 It tells how close are data to the fitted regression line. 29 30 - Highest score can be 1.0 and it indicates that the predictors 31 perfectly accounts for variation in the target. 32 - Score 0.0 indicates that the predictors do not 33 account for variation in the target. 34 - It can also be negative if the model is worse. 35 36 Usage: 37 ```python 38 actuals = tf.constant([1, 4, 3], dtype=tf.float32) 39 preds = tf.constant([2, 4, 4], dtype=tf.float32) 40 result = tf.keras.metrics.RSquare() 41 result.update_state(actuals, preds) 42 print('R^2 score is: ', r1.result().numpy()) # 0.57142866 43 ``` 44 """ 45 46 @typechecked 47 def __init__( 48 self, name: str = "r_square", dtype: AcceptableDTypes = None, **kwargs 49 ): 50 super().__init__(name=name, dtype=dtype) 51 self.squared_sum = self.add_weight("squared_sum", initializer="zeros") 52 self.sum = self.add_weight("sum", initializer="zeros") 53 self.res = self.add_weight("residual", initializer="zeros") 54 self.count = self.add_weight("count", initializer="zeros") 55 56 def update_state(self, y_true, y_pred): 57 y_true = tf.convert_to_tensor(y_true, tf.float32) 58 y_pred = tf.convert_to_tensor(y_pred, tf.float32) 59 self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2)) 60 self.sum.assign_add(tf.reduce_sum(y_true)) 61 self.res.assign_add(tf.reduce_sum(tf.square(tf.subtract(y_true, y_pred)))) 62 self.count.assign_add(tf.cast(tf.shape(y_true)[0], tf.float32)) 63 64 def result(self): 65 mean = self.sum / self.count 66 total = self.squared_sum - 2 * self.sum * mean + self.count * mean ** 2 67 return 1 - (self.res / total) 68 69 def reset_states(self): 70 # The state of the metric will be reset at the start of each epoch. 71 self.squared_sum.assign(0.0) 72 self.sum.assign(0.0) 73 self.res.assign(0.0) 74 self.count.assign(0.0) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tensorflow_addons/metrics/multilabel_confusion_matrix.py b/tensorflow_addons/metrics/multilabel_confusion_matrix.py --- a/tensorflow_addons/metrics/multilabel_confusion_matrix.py +++ b/tensorflow_addons/metrics/multilabel_confusion_matrix.py @@ -14,6 +14,8 @@ # ============================================================================== """Implements Multi-label confusion matrix scores.""" +import warnings + import tensorflow as tf from tensorflow.keras.metrics import Metric import numpy as np @@ -104,7 +106,14 @@ dtype=self.dtype, ) - def update_state(self, y_true, y_pred): + def update_state(self, y_true, y_pred, sample_weight=None): + if sample_weight is not None: + warnings.warn( + "`sample_weight` is not None. Be aware that MultiLabelConfusionMatrix " + "does not take `sample_weight` into account when computing the metric " + "value." + ) + y_true = tf.cast(y_true, tf.int32) y_pred = tf.cast(y_pred, tf.int32) # true positive diff --git a/tensorflow_addons/metrics/r_square.py b/tensorflow_addons/metrics/r_square.py --- a/tensorflow_addons/metrics/r_square.py +++ b/tensorflow_addons/metrics/r_square.py @@ -14,6 +14,8 @@ # ============================================================================== """Implements R^2 scores.""" +import warnings + import tensorflow as tf from tensorflow.keras.metrics import Metric @@ -53,7 +55,13 @@ self.res = self.add_weight("residual", initializer="zeros") self.count = self.add_weight("count", initializer="zeros") - def update_state(self, y_true, y_pred): + def update_state(self, y_true, y_pred, sample_weight=None): + if sample_weight is not None: + warnings.warn( + "`sample_weight` is not None. Be aware that RSquare " + "does not take `sample_weight` into account when computing the metric " + "value." + ) y_true = tf.convert_to_tensor(y_true, tf.float32) y_pred = tf.convert_to_tensor(y_pred, tf.float32) self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2))
{"golden_diff": "diff --git a/tensorflow_addons/metrics/multilabel_confusion_matrix.py b/tensorflow_addons/metrics/multilabel_confusion_matrix.py\n--- a/tensorflow_addons/metrics/multilabel_confusion_matrix.py\n+++ b/tensorflow_addons/metrics/multilabel_confusion_matrix.py\n@@ -14,6 +14,8 @@\n # ==============================================================================\n \"\"\"Implements Multi-label confusion matrix scores.\"\"\"\n \n+import warnings\n+\n import tensorflow as tf\n from tensorflow.keras.metrics import Metric\n import numpy as np\n@@ -104,7 +106,14 @@\n dtype=self.dtype,\n )\n \n- def update_state(self, y_true, y_pred):\n+ def update_state(self, y_true, y_pred, sample_weight=None):\n+ if sample_weight is not None:\n+ warnings.warn(\n+ \"`sample_weight` is not None. Be aware that MultiLabelConfusionMatrix \"\n+ \"does not take `sample_weight` into account when computing the metric \"\n+ \"value.\"\n+ )\n+\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n # true positive\ndiff --git a/tensorflow_addons/metrics/r_square.py b/tensorflow_addons/metrics/r_square.py\n--- a/tensorflow_addons/metrics/r_square.py\n+++ b/tensorflow_addons/metrics/r_square.py\n@@ -14,6 +14,8 @@\n # ==============================================================================\n \"\"\"Implements R^2 scores.\"\"\"\n \n+import warnings\n+\n import tensorflow as tf\n from tensorflow.keras.metrics import Metric\n \n@@ -53,7 +55,13 @@\n self.res = self.add_weight(\"residual\", initializer=\"zeros\")\n self.count = self.add_weight(\"count\", initializer=\"zeros\")\n \n- def update_state(self, y_true, y_pred):\n+ def update_state(self, y_true, y_pred, sample_weight=None):\n+ if sample_weight is not None:\n+ warnings.warn(\n+ \"`sample_weight` is not None. Be aware that RSquare \"\n+ \"does not take `sample_weight` into account when computing the metric \"\n+ \"value.\"\n+ )\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2))\n", "issue": "RSquare TypeError: tf__update_state() got an unexpected keyword argument 'sample_weight'\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): ubuntu 18.04\r\n- TensorFlow version and how it was installed (source or binary): 2.1.0 binary (conda)\r\n- TensorFlow-Addons version and how it was installed (source or binary): 0.8.3 binary(pip)\r\n- Python version: 3.7.6\r\n- Is GPU used? (yes/no): yes\r\n\r\n**Describe the bug**\r\n\r\nThe code goes wrong when I add \"tfa.metrics.RSquare(dtype=tf.float32)\" to model metrics.\r\nThe exception is \"TypeError: tf__update_state() got an unexpected keyword argument 'sample_weight'\"\r\n\r\nAnd I don't see \"sample_weight\" parameter , what been added shown in #564 , of update_state() function in class RSquare in addons version 0.8.3.\r\n\r\nAre there something wrong with my installed tensorflow and addons package?\r\n\r\n\r\n**Code to reproduce the issue**\r\n\r\nUsage in my code:\r\n``` python\r\nmodel.compile(\r\n loss='mse',\r\n optimizer=optimizer,\r\n metrics=['mae', 'mse', tfa.metrics.RSquare(dtype=tf.float32)]\r\n )\r\n```\r\n\r\n**Other info / logs**\r\n\r\nInclude any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached.\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Multi-label confusion matrix scores.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Metric\nimport numpy as np\n\nfrom typeguard import typechecked\nfrom tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike\n\n\nclass MultiLabelConfusionMatrix(Metric):\n \"\"\"Computes Multi-label confusion matrix.\n\n Class-wise confusion matrix is computed for the\n evaluation of classification.\n\n If multi-class input is provided, it will be treated\n as multilabel data.\n\n Consider classification problem with two classes\n (i.e num_classes=2).\n\n Resultant matrix `M` will be in the shape of (num_classes, 2, 2).\n\n Every class `i` has a dedicated 2*2 matrix that contains:\n\n - true negatives for class i in M(0,0)\n - false positives for class i in M(0,1)\n - false negatives for class i in M(1,0)\n - true positives for class i in M(1,1)\n\n ```python\n # multilabel confusion matrix\n y_true = tf.constant([[1, 0, 1], [0, 1, 0]],\n dtype=tf.int32)\n y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],\n dtype=tf.int32)\n output = MultiLabelConfusionMatrix(num_classes=3)\n output.update_state(y_true, y_pred)\n print('Confusion matrix:', output.result().numpy())\n\n # Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]\n [[0 1] [1 0]]]\n\n # if multiclass input is provided\n y_true = tf.constant([[1, 0, 0], [0, 1, 0]],\n dtype=tf.int32)\n y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],\n dtype=tf.int32)\n output = MultiLabelConfusionMatrix(num_classes=3)\n output.update_state(y_true, y_pred)\n print('Confusion matrix:', output.result().numpy())\n\n # Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n num_classes: FloatTensorLike,\n name: str = \"Multilabel_confusion_matrix\",\n dtype: AcceptableDTypes = None,\n **kwargs\n ):\n super().__init__(name=name, dtype=dtype)\n self.num_classes = num_classes\n self.true_positives = self.add_weight(\n \"true_positives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.false_positives = self.add_weight(\n \"false_positives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.false_negatives = self.add_weight(\n \"false_negatives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.true_negatives = self.add_weight(\n \"true_negatives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n\n def update_state(self, y_true, y_pred):\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n # true positive\n true_positive = tf.math.count_nonzero(y_true * y_pred, 0)\n # predictions sum\n pred_sum = tf.math.count_nonzero(y_pred, 0)\n # true labels sum\n true_sum = tf.math.count_nonzero(y_true, 0)\n false_positive = pred_sum - true_positive\n false_negative = true_sum - true_positive\n y_true_negative = tf.math.not_equal(y_true, 1)\n y_pred_negative = tf.math.not_equal(y_pred, 1)\n true_negative = tf.math.count_nonzero(\n tf.math.logical_and(y_true_negative, y_pred_negative), axis=0\n )\n\n # true positive state update\n self.true_positives.assign_add(tf.cast(true_positive, self.dtype))\n # false positive state update\n self.false_positives.assign_add(tf.cast(false_positive, self.dtype))\n # false negative state update\n self.false_negatives.assign_add(tf.cast(false_negative, self.dtype))\n # true negative state update\n self.true_negatives.assign_add(tf.cast(true_negative, self.dtype))\n\n def result(self):\n flat_confusion_matrix = tf.convert_to_tensor(\n [\n self.true_negatives,\n self.false_positives,\n self.false_negatives,\n self.true_positives,\n ]\n )\n # reshape into 2*2 matrix\n confusion_matrix = tf.reshape(tf.transpose(flat_confusion_matrix), [-1, 2, 2])\n\n return confusion_matrix\n\n def get_config(self):\n \"\"\"Returns the serializable config of the metric.\"\"\"\n\n config = {\n \"num_classes\": self.num_classes,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n\n def reset_states(self):\n self.true_positives.assign(np.zeros(self.num_classes), np.int32)\n self.false_positives.assign(np.zeros(self.num_classes), np.int32)\n self.false_negatives.assign(np.zeros(self.num_classes), np.int32)\n self.true_negatives.assign(np.zeros(self.num_classes), np.int32)\n", "path": "tensorflow_addons/metrics/multilabel_confusion_matrix.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements R^2 scores.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Metric\n\nfrom typeguard import typechecked\nfrom tensorflow_addons.utils.types import AcceptableDTypes\n\n\nclass RSquare(Metric):\n \"\"\"Compute R^2 score.\n\n This is also called as coefficient of determination.\n It tells how close are data to the fitted regression line.\n\n - Highest score can be 1.0 and it indicates that the predictors\n perfectly accounts for variation in the target.\n - Score 0.0 indicates that the predictors do not\n account for variation in the target.\n - It can also be negative if the model is worse.\n\n Usage:\n ```python\n actuals = tf.constant([1, 4, 3], dtype=tf.float32)\n preds = tf.constant([2, 4, 4], dtype=tf.float32)\n result = tf.keras.metrics.RSquare()\n result.update_state(actuals, preds)\n print('R^2 score is: ', r1.result().numpy()) # 0.57142866\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self, name: str = \"r_square\", dtype: AcceptableDTypes = None, **kwargs\n ):\n super().__init__(name=name, dtype=dtype)\n self.squared_sum = self.add_weight(\"squared_sum\", initializer=\"zeros\")\n self.sum = self.add_weight(\"sum\", initializer=\"zeros\")\n self.res = self.add_weight(\"residual\", initializer=\"zeros\")\n self.count = self.add_weight(\"count\", initializer=\"zeros\")\n\n def update_state(self, y_true, y_pred):\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2))\n self.sum.assign_add(tf.reduce_sum(y_true))\n self.res.assign_add(tf.reduce_sum(tf.square(tf.subtract(y_true, y_pred))))\n self.count.assign_add(tf.cast(tf.shape(y_true)[0], tf.float32))\n\n def result(self):\n mean = self.sum / self.count\n total = self.squared_sum - 2 * self.sum * mean + self.count * mean ** 2\n return 1 - (self.res / total)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.squared_sum.assign(0.0)\n self.sum.assign(0.0)\n self.res.assign(0.0)\n self.count.assign(0.0)\n", "path": "tensorflow_addons/metrics/r_square.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Multi-label confusion matrix scores.\"\"\"\n\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Metric\nimport numpy as np\n\nfrom typeguard import typechecked\nfrom tensorflow_addons.utils.types import AcceptableDTypes, FloatTensorLike\n\n\nclass MultiLabelConfusionMatrix(Metric):\n \"\"\"Computes Multi-label confusion matrix.\n\n Class-wise confusion matrix is computed for the\n evaluation of classification.\n\n If multi-class input is provided, it will be treated\n as multilabel data.\n\n Consider classification problem with two classes\n (i.e num_classes=2).\n\n Resultant matrix `M` will be in the shape of (num_classes, 2, 2).\n\n Every class `i` has a dedicated 2*2 matrix that contains:\n\n - true negatives for class i in M(0,0)\n - false positives for class i in M(0,1)\n - false negatives for class i in M(1,0)\n - true positives for class i in M(1,1)\n\n ```python\n # multilabel confusion matrix\n y_true = tf.constant([[1, 0, 1], [0, 1, 0]],\n dtype=tf.int32)\n y_pred = tf.constant([[1, 0, 0],[0, 1, 1]],\n dtype=tf.int32)\n output = MultiLabelConfusionMatrix(num_classes=3)\n output.update_state(y_true, y_pred)\n print('Confusion matrix:', output.result().numpy())\n\n # Confusion matrix: [[[1 0] [0 1]] [[1 0] [0 1]]\n [[0 1] [1 0]]]\n\n # if multiclass input is provided\n y_true = tf.constant([[1, 0, 0], [0, 1, 0]],\n dtype=tf.int32)\n y_pred = tf.constant([[1, 0, 0],[0, 0, 1]],\n dtype=tf.int32)\n output = MultiLabelConfusionMatrix(num_classes=3)\n output.update_state(y_true, y_pred)\n print('Confusion matrix:', output.result().numpy())\n\n # Confusion matrix: [[[1 0] [0 1]] [[1 0] [1 0]] [[1 1] [0 0]]]\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n num_classes: FloatTensorLike,\n name: str = \"Multilabel_confusion_matrix\",\n dtype: AcceptableDTypes = None,\n **kwargs\n ):\n super().__init__(name=name, dtype=dtype)\n self.num_classes = num_classes\n self.true_positives = self.add_weight(\n \"true_positives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.false_positives = self.add_weight(\n \"false_positives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.false_negatives = self.add_weight(\n \"false_negatives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n self.true_negatives = self.add_weight(\n \"true_negatives\",\n shape=[self.num_classes],\n initializer=\"zeros\",\n dtype=self.dtype,\n )\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n if sample_weight is not None:\n warnings.warn(\n \"`sample_weight` is not None. Be aware that MultiLabelConfusionMatrix \"\n \"does not take `sample_weight` into account when computing the metric \"\n \"value.\"\n )\n\n y_true = tf.cast(y_true, tf.int32)\n y_pred = tf.cast(y_pred, tf.int32)\n # true positive\n true_positive = tf.math.count_nonzero(y_true * y_pred, 0)\n # predictions sum\n pred_sum = tf.math.count_nonzero(y_pred, 0)\n # true labels sum\n true_sum = tf.math.count_nonzero(y_true, 0)\n false_positive = pred_sum - true_positive\n false_negative = true_sum - true_positive\n y_true_negative = tf.math.not_equal(y_true, 1)\n y_pred_negative = tf.math.not_equal(y_pred, 1)\n true_negative = tf.math.count_nonzero(\n tf.math.logical_and(y_true_negative, y_pred_negative), axis=0\n )\n\n # true positive state update\n self.true_positives.assign_add(tf.cast(true_positive, self.dtype))\n # false positive state update\n self.false_positives.assign_add(tf.cast(false_positive, self.dtype))\n # false negative state update\n self.false_negatives.assign_add(tf.cast(false_negative, self.dtype))\n # true negative state update\n self.true_negatives.assign_add(tf.cast(true_negative, self.dtype))\n\n def result(self):\n flat_confusion_matrix = tf.convert_to_tensor(\n [\n self.true_negatives,\n self.false_positives,\n self.false_negatives,\n self.true_positives,\n ]\n )\n # reshape into 2*2 matrix\n confusion_matrix = tf.reshape(tf.transpose(flat_confusion_matrix), [-1, 2, 2])\n\n return confusion_matrix\n\n def get_config(self):\n \"\"\"Returns the serializable config of the metric.\"\"\"\n\n config = {\n \"num_classes\": self.num_classes,\n }\n base_config = super().get_config()\n return {**base_config, **config}\n\n def reset_states(self):\n self.true_positives.assign(np.zeros(self.num_classes), np.int32)\n self.false_positives.assign(np.zeros(self.num_classes), np.int32)\n self.false_negatives.assign(np.zeros(self.num_classes), np.int32)\n self.true_negatives.assign(np.zeros(self.num_classes), np.int32)\n", "path": "tensorflow_addons/metrics/multilabel_confusion_matrix.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements R^2 scores.\"\"\"\n\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow.keras.metrics import Metric\n\nfrom typeguard import typechecked\nfrom tensorflow_addons.utils.types import AcceptableDTypes\n\n\nclass RSquare(Metric):\n \"\"\"Compute R^2 score.\n\n This is also called as coefficient of determination.\n It tells how close are data to the fitted regression line.\n\n - Highest score can be 1.0 and it indicates that the predictors\n perfectly accounts for variation in the target.\n - Score 0.0 indicates that the predictors do not\n account for variation in the target.\n - It can also be negative if the model is worse.\n\n Usage:\n ```python\n actuals = tf.constant([1, 4, 3], dtype=tf.float32)\n preds = tf.constant([2, 4, 4], dtype=tf.float32)\n result = tf.keras.metrics.RSquare()\n result.update_state(actuals, preds)\n print('R^2 score is: ', r1.result().numpy()) # 0.57142866\n ```\n \"\"\"\n\n @typechecked\n def __init__(\n self, name: str = \"r_square\", dtype: AcceptableDTypes = None, **kwargs\n ):\n super().__init__(name=name, dtype=dtype)\n self.squared_sum = self.add_weight(\"squared_sum\", initializer=\"zeros\")\n self.sum = self.add_weight(\"sum\", initializer=\"zeros\")\n self.res = self.add_weight(\"residual\", initializer=\"zeros\")\n self.count = self.add_weight(\"count\", initializer=\"zeros\")\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n if sample_weight is not None:\n warnings.warn(\n \"`sample_weight` is not None. Be aware that RSquare \"\n \"does not take `sample_weight` into account when computing the metric \"\n \"value.\"\n )\n y_true = tf.convert_to_tensor(y_true, tf.float32)\n y_pred = tf.convert_to_tensor(y_pred, tf.float32)\n self.squared_sum.assign_add(tf.reduce_sum(y_true ** 2))\n self.sum.assign_add(tf.reduce_sum(y_true))\n self.res.assign_add(tf.reduce_sum(tf.square(tf.subtract(y_true, y_pred))))\n self.count.assign_add(tf.cast(tf.shape(y_true)[0], tf.float32))\n\n def result(self):\n mean = self.sum / self.count\n total = self.squared_sum - 2 * self.sum * mean + self.count * mean ** 2\n return 1 - (self.res / total)\n\n def reset_states(self):\n # The state of the metric will be reset at the start of each epoch.\n self.squared_sum.assign(0.0)\n self.sum.assign(0.0)\n self.res.assign(0.0)\n self.count.assign(0.0)\n", "path": "tensorflow_addons/metrics/r_square.py"}]}
3,192
533
gh_patches_debug_25327
rasdani/github-patches
git_diff
cocotb__cocotb-2134
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Global DUT handle Right now we supply a reference to the root `SimHandle` (aka the DUT object) as the sole argument to every test. There are some disadvantages to this, the biggest being the DUT object is not available from module-level code. I noticed this in https://github.com/cocotb/cocotb/pull/1502#discussion_r425582308, where information about the DUT is used at the module-level. I myself have had a need for this in the past, but simply worked around it. Instead, I propose we make the DUT object globally available from the cocotb module: `cocotb.dut`. We now have access to the DUT object at the module-level. This is safe because the DUT cannot change from test to test. We could eventually stop supplying the DUT as an argument to the test functions, which aligns our tests better with other testing frameworks like pytest. We would have to be careful because we could end up with module-level code modifying the DUT. We would want all tests in a module to run before loading the next module. That means we cannot pre-discover all functions to calculate the total test count. We could still do counts on a module level and have the summary print the total number of tests run. Or we could make the decision that we are not allowed to modify the DUT at the module-level, which can be enforced. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cocotb/__init__.py` Content: ``` 1 # Copyright (c) 2013 Potential Ventures Ltd 2 # Copyright (c) 2013 SolarFlare Communications Inc 3 # All rights reserved. 4 5 # Redistribution and use in source and binary forms, with or without 6 # modification, are permitted provided that the following conditions are met: 7 # * Redistributions of source code must retain the above copyright 8 # notice, this list of conditions and the following disclaimer. 9 # * Redistributions in binary form must reproduce the above copyright 10 # notice, this list of conditions and the following disclaimer in the 11 # documentation and/or other materials provided with the distribution. 12 # * Neither the name of Potential Ventures Ltd, 13 # SolarFlare Communications Inc nor the 14 # names of its contributors may be used to endorse or promote products 15 # derived from this software without specific prior written permission. 16 17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 """ 29 Cocotb is a coroutine, cosimulation framework for writing testbenches in Python. 30 31 See https://docs.cocotb.org for full documentation 32 """ 33 import os 34 import sys 35 import logging 36 import threading 37 import random 38 import time 39 import warnings 40 from typing import Dict, List, Union 41 from collections.abc import Coroutine 42 43 import cocotb.handle 44 import cocotb.log 45 from cocotb.scheduler import Scheduler 46 from cocotb.regression import RegressionManager 47 from cocotb.decorators import RunningTask 48 49 # Things we want in the cocotb namespace 50 from cocotb.decorators import test, coroutine, hook, function, external # noqa: F401 51 52 from ._version import __version__ 53 54 55 def _setup_logging(): 56 global log 57 58 def _reopen_stream_with_buffering(stream_name): 59 try: 60 if not getattr(sys, stream_name).isatty(): 61 setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1)) 62 return True 63 return False 64 except Exception as e: 65 return e 66 67 # If stdout/stderr are not TTYs, Python may not have opened them with line 68 # buffering. In that case, try to reopen them with line buffering 69 # explicitly enabled. This ensures that prints such as stack traces always 70 # appear. Continue silently if this fails. 71 _stdout_buffer_result = _reopen_stream_with_buffering('stdout') 72 _stderr_buffer_result = _reopen_stream_with_buffering('stderr') 73 74 # Don't set the logging up until we've attempted to fix the standard IO, 75 # otherwise it will end up connected to the unfixed IO. 76 cocotb.log.default_config() 77 log = logging.getLogger(__name__) 78 79 # we can't log these things until the logging is set up! 80 if _stderr_buffer_result is True: 81 log.debug("Reopened stderr with line buffering") 82 if _stdout_buffer_result is True: 83 log.debug("Reopened stdout with line buffering") 84 if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception): 85 if isinstance(_stdout_buffer_result, Exception): 86 log.warning("Failed to ensure that stdout is line buffered", exc_info=_stdout_buffer_result) 87 if isinstance(_stderr_buffer_result, Exception): 88 log.warning("Failed to ensure that stderr is line buffered", exc_info=_stderr_buffer_result) 89 log.warning("Some stack traces may not appear because of this.") 90 91 del _stderr_buffer_result, _stdout_buffer_result 92 93 94 # Singleton scheduler instance 95 # NB this cheekily ensures a singleton since we're replacing the reference 96 # so that cocotb.scheduler gives you the singleton instance and not the 97 # scheduler package 98 99 scheduler = None # type: cocotb.scheduler.Scheduler 100 """The global scheduler instance.""" 101 102 regression_manager = None # type: cocotb.regression.RegressionManager 103 """The global regression manager instance.""" 104 105 argv = None # type: List[str] 106 """The argument list as seen by the simulator""" 107 108 argc = None # type: int 109 """The length of :data:`cocotb.argv`""" 110 111 plusargs = None # type: Dict[str, Union[bool, str]] 112 """A dictionary of "plusargs" handed to the simulation. See :make:var:`PLUSARGS` for details.""" 113 114 LANGUAGE = os.getenv("TOPLEVEL_LANG") # type: str 115 """The value of :make:var:`TOPLEVEL_LANG`""" 116 117 SIM_NAME = None # type: str 118 """The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator""" 119 120 SIM_VERSION = None # type: str 121 """The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator""" 122 123 RANDOM_SEED = None # type: int 124 """ 125 The value passed to the Python default random number generator. 126 See :envvar:`RANDOM_SEED` for details on how the value is computed. 127 """ 128 129 _library_coverage = None 130 """ used for cocotb library coverage """ 131 132 133 def fork(coro: Union[RunningTask, Coroutine]) -> RunningTask: 134 """ Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on its use. """ 135 return scheduler.add(coro) 136 137 138 # FIXME is this really required? 139 _rlock = threading.RLock() 140 141 142 def mem_debug(port): 143 import cocotb.memdebug 144 cocotb.memdebug.start(port) 145 146 147 def _initialise_testbench(argv_): 148 """Initialize testbench. 149 150 This function is called after the simulator has elaborated all 151 entities and is ready to run the test. 152 153 The test must be defined by the environment variables 154 :envvar:`MODULE` and :envvar:`TESTCASE`. 155 156 The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a 157 comma-separated list of modules to be executed before the first test. 158 """ 159 _rlock.acquire() 160 161 if "COCOTB_LIBRARY_COVERAGE" in os.environ: 162 import coverage 163 164 global _library_coverage 165 _library_coverage = coverage.coverage( 166 data_file=".coverage.cocotb", 167 branch=True, 168 include=["{}/*".format(os.path.dirname(__file__))]) 169 _library_coverage.start() 170 171 global argc, argv 172 argv = argv_ 173 argc = len(argv) 174 175 root_name = os.getenv("TOPLEVEL") 176 if root_name is not None: 177 if root_name == "": 178 root_name = None 179 elif '.' in root_name: 180 # Skip any library component of the toplevel 181 root_name = root_name.split(".", 1)[1] 182 183 # sys.path normally includes "" (the current directory), but does not appear to when python is embedded. 184 # Add it back because users expect to be able to import files in their test directory. 185 # TODO: move this to gpi_embed.cpp 186 sys.path.insert(0, "") 187 188 _setup_logging() 189 190 # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners 191 # If the user doesn't want to see these, they can always change the global 192 # warning settings in their test module. 193 if not sys.warnoptions: 194 warnings.simplefilter("default") 195 196 from cocotb import simulator 197 198 global SIM_NAME, SIM_VERSION 199 SIM_NAME = simulator.get_simulator_product().strip() 200 SIM_VERSION = simulator.get_simulator_version().strip() 201 202 cocotb.log.info("Running on {} version {}".format(SIM_NAME, SIM_VERSION)) 203 204 memcheck_port = os.getenv('MEMCHECK') 205 if memcheck_port is not None: 206 mem_debug(int(memcheck_port)) 207 208 log.info("Running tests with cocotb v%s from %s" % 209 (__version__, os.path.dirname(__file__))) 210 211 # Create the base handle type 212 213 process_plusargs() 214 215 global scheduler 216 scheduler = Scheduler() 217 218 # Seed the Python random number generator to make this repeatable 219 global RANDOM_SEED 220 RANDOM_SEED = os.getenv('RANDOM_SEED') 221 222 if RANDOM_SEED is None: 223 if 'ntb_random_seed' in plusargs: 224 RANDOM_SEED = eval(plusargs['ntb_random_seed']) 225 elif 'seed' in plusargs: 226 RANDOM_SEED = eval(plusargs['seed']) 227 else: 228 RANDOM_SEED = int(time.time()) 229 log.info("Seeding Python random module with %d" % (RANDOM_SEED)) 230 else: 231 RANDOM_SEED = int(RANDOM_SEED) 232 log.info("Seeding Python random module with supplied seed %d" % (RANDOM_SEED)) 233 random.seed(RANDOM_SEED) 234 235 # Setup DUT object 236 from cocotb import simulator 237 238 handle = simulator.get_root_handle(root_name) 239 if not handle: 240 raise RuntimeError("Can not find root handle ({})".format(root_name)) 241 242 dut = cocotb.handle.SimHandle(handle) 243 244 # start Regression Manager 245 global regression_manager 246 regression_manager = RegressionManager.from_discovery(dut) 247 regression_manager.execute() 248 249 _rlock.release() 250 return True 251 252 253 def _sim_event(level, message): 254 """Function that can be called externally to signal an event.""" 255 # SIM_INFO = 0 256 SIM_TEST_FAIL = 1 257 SIM_FAIL = 2 258 from cocotb.result import TestFailure, SimFailure 259 260 if level is SIM_TEST_FAIL: 261 scheduler.log.error("Failing test at simulator request") 262 scheduler.finish_test(TestFailure("Failure from external source: %s" % 263 message)) 264 elif level is SIM_FAIL: 265 # We simply return here as the simulator will exit 266 # so no cleanup is needed 267 msg = ("Failing test at simulator request before test run completion: " 268 "%s" % message) 269 scheduler.log.error(msg) 270 scheduler.finish_scheduler(SimFailure(msg)) 271 else: 272 scheduler.log.error("Unsupported sim event") 273 274 return True 275 276 277 def process_plusargs(): 278 279 global plusargs 280 281 plusargs = {} 282 283 for option in cocotb.argv: 284 if option.startswith('+'): 285 if option.find('=') != -1: 286 (name, value) = option[1:].split('=') 287 plusargs[name] = value 288 else: 289 plusargs[option[1:]] = True 290 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocotb/__init__.py b/cocotb/__init__.py --- a/cocotb/__init__.py +++ b/cocotb/__init__.py @@ -129,6 +129,16 @@ _library_coverage = None """ used for cocotb library coverage """ +top = None # type: cocotb.handle.SimHandleBase +r""" +A handle to the :envvar:`TOPLEVEL` entity/module. + +This is equivalent to the :term:`DUT` parameter given to cocotb tests, so it can be used wherever that variable can be used. +It is particularly useful for extracting information about the :term:`DUT` in module-level class and function definitions; +and in parameters to :class:`.TestFactory`\ s. +``None`` if :mod:`cocotb` was not loaded from a simulator. +""" + def fork(coro: Union[RunningTask, Coroutine]) -> RunningTask: """ Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on its use. """ @@ -239,11 +249,12 @@ if not handle: raise RuntimeError("Can not find root handle ({})".format(root_name)) - dut = cocotb.handle.SimHandle(handle) + global top + top = cocotb.handle.SimHandle(handle) # start Regression Manager global regression_manager - regression_manager = RegressionManager.from_discovery(dut) + regression_manager = RegressionManager.from_discovery(top) regression_manager.execute() _rlock.release()
{"golden_diff": "diff --git a/cocotb/__init__.py b/cocotb/__init__.py\n--- a/cocotb/__init__.py\n+++ b/cocotb/__init__.py\n@@ -129,6 +129,16 @@\n _library_coverage = None\n \"\"\" used for cocotb library coverage \"\"\"\n \n+top = None # type: cocotb.handle.SimHandleBase\n+r\"\"\"\n+A handle to the :envvar:`TOPLEVEL` entity/module.\n+\n+This is equivalent to the :term:`DUT` parameter given to cocotb tests, so it can be used wherever that variable can be used.\n+It is particularly useful for extracting information about the :term:`DUT` in module-level class and function definitions;\n+and in parameters to :class:`.TestFactory`\\ s.\n+``None`` if :mod:`cocotb` was not loaded from a simulator.\n+\"\"\"\n+\n \n def fork(coro: Union[RunningTask, Coroutine]) -> RunningTask:\n \"\"\" Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on its use. \"\"\"\n@@ -239,11 +249,12 @@\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n \n- dut = cocotb.handle.SimHandle(handle)\n+ global top\n+ top = cocotb.handle.SimHandle(handle)\n \n # start Regression Manager\n global regression_manager\n- regression_manager = RegressionManager.from_discovery(dut)\n+ regression_manager = RegressionManager.from_discovery(top)\n regression_manager.execute()\n \n _rlock.release()\n", "issue": "Global DUT handle\nRight now we supply a reference to the root `SimHandle` (aka the DUT object) as the sole argument to every test. There are some disadvantages to this, the biggest being the DUT object is not available from module-level code. I noticed this in https://github.com/cocotb/cocotb/pull/1502#discussion_r425582308, where information about the DUT is used at the module-level. I myself have had a need for this in the past, but simply worked around it.\r\n\r\nInstead, I propose we make the DUT object globally available from the cocotb module: `cocotb.dut`. We now have access to the DUT object at the module-level. This is safe because the DUT cannot change from test to test. We could eventually stop supplying the DUT as an argument to the test functions, which aligns our tests better with other testing frameworks like pytest.\r\n\r\nWe would have to be careful because we could end up with module-level code modifying the DUT. We would want all tests in a module to run before loading the next module. That means we cannot pre-discover all functions to calculate the total test count. We could still do counts on a module level and have the summary print the total number of tests run. Or we could make the decision that we are not allowed to modify the DUT at the module-level, which can be enforced.\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee https://docs.cocotb.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\nimport warnings\nfrom typing import Dict, List, Union\nfrom collections.abc import Coroutine\n\nimport cocotb.handle\nimport cocotb.log\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.regression import RegressionManager\nfrom cocotb.decorators import RunningTask\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\nfrom ._version import __version__\n\n\ndef _setup_logging():\n global log\n\n def _reopen_stream_with_buffering(stream_name):\n try:\n if not getattr(sys, stream_name).isatty():\n setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1))\n return True\n return False\n except Exception as e:\n return e\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n _stdout_buffer_result = _reopen_stream_with_buffering('stdout')\n _stderr_buffer_result = _reopen_stream_with_buffering('stderr')\n\n # Don't set the logging up until we've attempted to fix the standard IO,\n # otherwise it will end up connected to the unfixed IO.\n cocotb.log.default_config()\n log = logging.getLogger(__name__)\n\n # we can't log these things until the logging is set up!\n if _stderr_buffer_result is True:\n log.debug(\"Reopened stderr with line buffering\")\n if _stdout_buffer_result is True:\n log.debug(\"Reopened stdout with line buffering\")\n if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception):\n if isinstance(_stdout_buffer_result, Exception):\n log.warning(\"Failed to ensure that stdout is line buffered\", exc_info=_stdout_buffer_result)\n if isinstance(_stderr_buffer_result, Exception):\n log.warning(\"Failed to ensure that stderr is line buffered\", exc_info=_stderr_buffer_result)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n del _stderr_buffer_result, _stdout_buffer_result\n\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\nscheduler = None # type: cocotb.scheduler.Scheduler\n\"\"\"The global scheduler instance.\"\"\"\n\nregression_manager = None # type: cocotb.regression.RegressionManager\n\"\"\"The global regression manager instance.\"\"\"\n\nargv = None # type: List[str]\n\"\"\"The argument list as seen by the simulator\"\"\"\n\nargc = None # type: int\n\"\"\"The length of :data:`cocotb.argv`\"\"\"\n\nplusargs = None # type: Dict[str, Union[bool, str]]\n\"\"\"A dictionary of \"plusargs\" handed to the simulation. See :make:var:`PLUSARGS` for details.\"\"\"\n\nLANGUAGE = os.getenv(\"TOPLEVEL_LANG\") # type: str\n\"\"\"The value of :make:var:`TOPLEVEL_LANG`\"\"\"\n\nSIM_NAME = None # type: str\n\"\"\"The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nSIM_VERSION = None # type: str\n\"\"\"The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nRANDOM_SEED = None # type: int\n\"\"\"\nThe value passed to the Python default random number generator.\nSee :envvar:`RANDOM_SEED` for details on how the value is computed.\n\"\"\"\n\n_library_coverage = None\n\"\"\" used for cocotb library coverage \"\"\"\n\n\ndef fork(coro: Union[RunningTask, Coroutine]) -> RunningTask:\n \"\"\" Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on its use. \"\"\"\n return scheduler.add(coro)\n\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(argv_):\n \"\"\"Initialize testbench.\n\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n :envvar:`MODULE` and :envvar:`TESTCASE`.\n\n The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a\n comma-separated list of modules to be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n if \"COCOTB_LIBRARY_COVERAGE\" in os.environ:\n import coverage\n\n global _library_coverage\n _library_coverage = coverage.coverage(\n data_file=\".coverage.cocotb\",\n branch=True,\n include=[\"{}/*\".format(os.path.dirname(__file__))])\n _library_coverage.start()\n\n global argc, argv\n argv = argv_\n argc = len(argv)\n\n root_name = os.getenv(\"TOPLEVEL\")\n if root_name is not None:\n if root_name == \"\":\n root_name = None\n elif '.' in root_name:\n # Skip any library component of the toplevel\n root_name = root_name.split(\".\", 1)[1]\n\n # sys.path normally includes \"\" (the current directory), but does not appear to when python is embedded.\n # Add it back because users expect to be able to import files in their test directory.\n # TODO: move this to gpi_embed.cpp\n sys.path.insert(0, \"\")\n\n _setup_logging()\n\n # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners\n # If the user doesn't want to see these, they can always change the global\n # warning settings in their test module.\n if not sys.warnoptions:\n warnings.simplefilter(\"default\")\n\n from cocotb import simulator\n\n global SIM_NAME, SIM_VERSION\n SIM_NAME = simulator.get_simulator_product().strip()\n SIM_VERSION = simulator.get_simulator_version().strip()\n\n cocotb.log.info(\"Running on {} version {}\".format(SIM_NAME, SIM_VERSION))\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n log.info(\"Running tests with cocotb v%s from %s\" %\n (__version__, os.path.dirname(__file__)))\n\n # Create the base handle type\n\n process_plusargs()\n\n global scheduler\n scheduler = Scheduler()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n # Setup DUT object\n from cocotb import simulator\n\n handle = simulator.get_root_handle(root_name)\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n\n dut = cocotb.handle.SimHandle(handle)\n\n # start Regression Manager\n global regression_manager\n regression_manager = RegressionManager.from_discovery(dut)\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event.\"\"\"\n # SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee https://docs.cocotb.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\nimport warnings\nfrom typing import Dict, List, Union\nfrom collections.abc import Coroutine\n\nimport cocotb.handle\nimport cocotb.log\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.regression import RegressionManager\nfrom cocotb.decorators import RunningTask\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\nfrom ._version import __version__\n\n\ndef _setup_logging():\n global log\n\n def _reopen_stream_with_buffering(stream_name):\n try:\n if not getattr(sys, stream_name).isatty():\n setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1))\n return True\n return False\n except Exception as e:\n return e\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n _stdout_buffer_result = _reopen_stream_with_buffering('stdout')\n _stderr_buffer_result = _reopen_stream_with_buffering('stderr')\n\n # Don't set the logging up until we've attempted to fix the standard IO,\n # otherwise it will end up connected to the unfixed IO.\n cocotb.log.default_config()\n log = logging.getLogger(__name__)\n\n # we can't log these things until the logging is set up!\n if _stderr_buffer_result is True:\n log.debug(\"Reopened stderr with line buffering\")\n if _stdout_buffer_result is True:\n log.debug(\"Reopened stdout with line buffering\")\n if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception):\n if isinstance(_stdout_buffer_result, Exception):\n log.warning(\"Failed to ensure that stdout is line buffered\", exc_info=_stdout_buffer_result)\n if isinstance(_stderr_buffer_result, Exception):\n log.warning(\"Failed to ensure that stderr is line buffered\", exc_info=_stderr_buffer_result)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n del _stderr_buffer_result, _stdout_buffer_result\n\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\nscheduler = None # type: cocotb.scheduler.Scheduler\n\"\"\"The global scheduler instance.\"\"\"\n\nregression_manager = None # type: cocotb.regression.RegressionManager\n\"\"\"The global regression manager instance.\"\"\"\n\nargv = None # type: List[str]\n\"\"\"The argument list as seen by the simulator\"\"\"\n\nargc = None # type: int\n\"\"\"The length of :data:`cocotb.argv`\"\"\"\n\nplusargs = None # type: Dict[str, Union[bool, str]]\n\"\"\"A dictionary of \"plusargs\" handed to the simulation. See :make:var:`PLUSARGS` for details.\"\"\"\n\nLANGUAGE = os.getenv(\"TOPLEVEL_LANG\") # type: str\n\"\"\"The value of :make:var:`TOPLEVEL_LANG`\"\"\"\n\nSIM_NAME = None # type: str\n\"\"\"The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nSIM_VERSION = None # type: str\n\"\"\"The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nRANDOM_SEED = None # type: int\n\"\"\"\nThe value passed to the Python default random number generator.\nSee :envvar:`RANDOM_SEED` for details on how the value is computed.\n\"\"\"\n\n_library_coverage = None\n\"\"\" used for cocotb library coverage \"\"\"\n\ntop = None # type: cocotb.handle.SimHandleBase\nr\"\"\"\nA handle to the :envvar:`TOPLEVEL` entity/module.\n\nThis is equivalent to the :term:`DUT` parameter given to cocotb tests, so it can be used wherever that variable can be used.\nIt is particularly useful for extracting information about the :term:`DUT` in module-level class and function definitions;\nand in parameters to :class:`.TestFactory`\\ s.\n``None`` if :mod:`cocotb` was not loaded from a simulator.\n\"\"\"\n\n\ndef fork(coro: Union[RunningTask, Coroutine]) -> RunningTask:\n \"\"\" Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on its use. \"\"\"\n return scheduler.add(coro)\n\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(argv_):\n \"\"\"Initialize testbench.\n\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n :envvar:`MODULE` and :envvar:`TESTCASE`.\n\n The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a\n comma-separated list of modules to be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n if \"COCOTB_LIBRARY_COVERAGE\" in os.environ:\n import coverage\n\n global _library_coverage\n _library_coverage = coverage.coverage(\n data_file=\".coverage.cocotb\",\n branch=True,\n include=[\"{}/*\".format(os.path.dirname(__file__))])\n _library_coverage.start()\n\n global argc, argv\n argv = argv_\n argc = len(argv)\n\n root_name = os.getenv(\"TOPLEVEL\")\n if root_name is not None:\n if root_name == \"\":\n root_name = None\n elif '.' in root_name:\n # Skip any library component of the toplevel\n root_name = root_name.split(\".\", 1)[1]\n\n # sys.path normally includes \"\" (the current directory), but does not appear to when python is embedded.\n # Add it back because users expect to be able to import files in their test directory.\n # TODO: move this to gpi_embed.cpp\n sys.path.insert(0, \"\")\n\n _setup_logging()\n\n # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners\n # If the user doesn't want to see these, they can always change the global\n # warning settings in their test module.\n if not sys.warnoptions:\n warnings.simplefilter(\"default\")\n\n from cocotb import simulator\n\n global SIM_NAME, SIM_VERSION\n SIM_NAME = simulator.get_simulator_product().strip()\n SIM_VERSION = simulator.get_simulator_version().strip()\n\n cocotb.log.info(\"Running on {} version {}\".format(SIM_NAME, SIM_VERSION))\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n log.info(\"Running tests with cocotb v%s from %s\" %\n (__version__, os.path.dirname(__file__)))\n\n # Create the base handle type\n\n process_plusargs()\n\n global scheduler\n scheduler = Scheduler()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n # Setup DUT object\n from cocotb import simulator\n\n handle = simulator.get_root_handle(root_name)\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n\n global top\n top = cocotb.handle.SimHandle(handle)\n\n # start Regression Manager\n global regression_manager\n regression_manager = RegressionManager.from_discovery(top)\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event.\"\"\"\n # SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}]}
3,711
360
gh_patches_debug_39385
rasdani/github-patches
git_diff
mabel-dev__opteryx-1383
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ✨ GCS improvements Create the client object once and reuse List blobs should only return the name of the blob and not any other details --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opteryx/connectors/gcp_cloudstorage_connector.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import os 14 from typing import Dict 15 from typing import List 16 17 import pyarrow 18 from orso.schema import FlatColumn 19 from orso.schema import RelationSchema 20 from orso.tools import single_item_cache 21 from orso.types import OrsoTypes 22 23 from opteryx.connectors.base.base_connector import BaseConnector 24 from opteryx.connectors.capabilities import Cacheable 25 from opteryx.connectors.capabilities import Partitionable 26 from opteryx.connectors.capabilities import PredicatePushable 27 from opteryx.exceptions import DatasetNotFoundError 28 from opteryx.exceptions import MissingDependencyError 29 from opteryx.exceptions import UnsupportedFileTypeError 30 from opteryx.utils import paths 31 from opteryx.utils.file_decoders import VALID_EXTENSIONS 32 from opteryx.utils.file_decoders import get_decoder 33 34 35 class GcpCloudStorageConnector(BaseConnector, Cacheable, Partitionable, PredicatePushable): 36 __mode__ = "Blob" 37 38 PUSHABLE_OPS: Dict[str, bool] = { 39 "Eq": True, 40 "NotEq": True, 41 "Gt": True, 42 "GtEq": True, 43 "Lt": True, 44 "LtEq": True, 45 } 46 47 PUSHABLE_TYPES = {OrsoTypes.BOOLEAN, OrsoTypes.DOUBLE, OrsoTypes.INTEGER, OrsoTypes.VARCHAR} 48 49 def __init__(self, credentials=None, **kwargs): 50 try: 51 from google.auth.credentials import AnonymousCredentials 52 from google.cloud import storage 53 except ImportError as err: 54 raise MissingDependencyError(err.name) from err 55 56 BaseConnector.__init__(self, **kwargs) 57 Partitionable.__init__(self, **kwargs) 58 Cacheable.__init__(self, **kwargs) 59 PredicatePushable.__init__(self, **kwargs) 60 61 self.dataset = self.dataset.replace(".", "/") 62 self.credentials = credentials 63 64 # we're going to cache the first blob as the schema and dataset reader 65 # sometimes both start here 66 self.cached_first_blob = None 67 self.client = self._get_storage_client() 68 69 def _get_storage_client(self): 70 from google.cloud import storage 71 72 if os.environ.get("STORAGE_EMULATOR_HOST"): 73 from google.auth.credentials import AnonymousCredentials 74 75 return storage.Client(credentials=AnonymousCredentials()) 76 else: # pragma: no cover 77 return storage.Client() 78 79 def _get_blob(self, bucket: str, blob_name: str): 80 gcs_bucket = self.client.get_bucket(bucket) 81 blob = gcs_bucket.get_blob(blob_name) 82 return blob 83 84 def read_blob(self, *, blob_name, **kwargs): 85 bucket, object_path, name, extension = paths.get_parts(blob_name) 86 87 bucket = bucket.replace("va_data", "va-data") 88 bucket = bucket.replace("data_", "data-") 89 90 blob = self._get_blob( 91 bucket=bucket, 92 blob_name=object_path + "/" + name + extension, 93 ) 94 return blob.download_as_bytes() 95 96 @single_item_cache 97 def get_list_of_blob_names(self, *, prefix: str) -> List[str]: 98 bucket, object_path, _, _ = paths.get_parts(prefix) 99 bucket = bucket.replace("va_data", "va-data") 100 bucket = bucket.replace("data_", "data-") 101 102 gcs_bucket = self.client.get_bucket(bucket) 103 blobs = self.client.list_blobs( 104 bucket_or_name=gcs_bucket, prefix=object_path, fields="items(name)" 105 ) 106 blobs = (bucket + "/" + blob.name for blob in blobs if not blob.name.endswith("/")) 107 return [blob for blob in blobs if ("." + blob.split(".")[-1].lower()) in VALID_EXTENSIONS] 108 109 def read_dataset( 110 self, columns: list = None, predicates: list = None, **kwargs 111 ) -> pyarrow.Table: 112 blob_names = self.partition_scheme.get_blobs_in_partition( 113 start_date=self.start_date, 114 end_date=self.end_date, 115 blob_list_getter=self.get_list_of_blob_names, 116 prefix=self.dataset, 117 ) 118 119 for blob_name in blob_names: 120 try: 121 decoder = get_decoder(blob_name) 122 blob_bytes = self.read_blob(blob_name=blob_name, statistics=self.statistics) 123 yield decoder(blob_bytes, projection=columns, selection=predicates) 124 except UnsupportedFileTypeError: 125 pass 126 127 def get_dataset_schema(self) -> RelationSchema: 128 # Try to read the schema from the metastore 129 self.schema = self.read_schema_from_metastore() 130 if self.schema: 131 return self.schema 132 133 # Read first blob for schema inference and cache it 134 record = next(self.read_dataset(), None) 135 self.cached_first_blob = record 136 137 if record is None: 138 raise DatasetNotFoundError(dataset=self.dataset) 139 140 arrow_schema = record.schema 141 142 self.schema = RelationSchema( 143 name=self.dataset, 144 columns=[FlatColumn.from_arrow(field) for field in arrow_schema], 145 ) 146 147 return self.schema 148 ``` Path: `opteryx/__version__.py` Content: ``` 1 __build__ = 203 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Store the version here so: 17 1) we don't load dependencies by storing it in __init__.py 18 2) we can import it in setup.py for the same reason 19 """ 20 from enum import Enum # isort: skip 21 22 23 class VersionStatus(Enum): 24 ALPHA = "alpha" 25 BETA = "beta" 26 RELEASE = "release" 27 28 29 _major = 0 30 _minor = 12 31 _revision = 3 32 _status = VersionStatus.BETA 33 34 __version__ = f"{_major}.{_minor}.{_revision}" + ( 35 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else "" 36 ) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opteryx/__version__.py b/opteryx/__version__.py --- a/opteryx/__version__.py +++ b/opteryx/__version__.py @@ -1,4 +1,4 @@ -__build__ = 203 +__build__ = 206 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/opteryx/connectors/gcp_cloudstorage_connector.py b/opteryx/connectors/gcp_cloudstorage_connector.py --- a/opteryx/connectors/gcp_cloudstorage_connector.py +++ b/opteryx/connectors/gcp_cloudstorage_connector.py @@ -11,10 +11,13 @@ # limitations under the License. import os +import urllib.request from typing import Dict from typing import List import pyarrow +import requests +from google.auth.transport.requests import Request from orso.schema import FlatColumn from orso.schema import RelationSchema from orso.tools import single_item_cache @@ -65,6 +68,17 @@ # sometimes both start here self.cached_first_blob = None self.client = self._get_storage_client() + self.client_credentials = self.client._credentials + + # Cache access tokens for accessing GCS + if not self.client_credentials.valid: + request = Request() + self.client_credentials.refresh(request) + self.access_token = self.client_credentials.token + + # Create a HTTP connection session to reduce effort for + # each fetch + self.session = requests.Session() def _get_storage_client(self): from google.cloud import storage @@ -76,22 +90,25 @@ else: # pragma: no cover return storage.Client() - def _get_blob(self, bucket: str, blob_name: str): - gcs_bucket = self.client.get_bucket(bucket) - blob = gcs_bucket.get_blob(blob_name) - return blob - def read_blob(self, *, blob_name, **kwargs): - bucket, object_path, name, extension = paths.get_parts(blob_name) + # For performance we use the GCS API directly, this is roughly 10% + # faster than using the SDK. + bucket, _, _, _ = paths.get_parts(blob_name) + + # Ensure the credentials are valid, refreshing them if necessary + if not self.client_credentials.valid: + request = Request() + self.client_credentials.refresh(request) + self.access_token = self.client_credentials.token bucket = bucket.replace("va_data", "va-data") bucket = bucket.replace("data_", "data-") + object_full_path = urllib.parse.quote(blob_name[(len(bucket) + 1) :], safe="") - blob = self._get_blob( - bucket=bucket, - blob_name=object_path + "/" + name + extension, - ) - return blob.download_as_bytes() + url = f"https://storage.googleapis.com/storage/v1/b/{bucket}/o/{object_full_path}?alt=media" + + response = self.session.get(url, headers={"Authorization": f"Bearer {self.access_token}"}) + return response.content @single_item_cache def get_list_of_blob_names(self, *, prefix: str) -> List[str]:
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 203\n+__build__ = 206\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/connectors/gcp_cloudstorage_connector.py b/opteryx/connectors/gcp_cloudstorage_connector.py\n--- a/opteryx/connectors/gcp_cloudstorage_connector.py\n+++ b/opteryx/connectors/gcp_cloudstorage_connector.py\n@@ -11,10 +11,13 @@\n # limitations under the License.\n \n import os\n+import urllib.request\n from typing import Dict\n from typing import List\n \n import pyarrow\n+import requests\n+from google.auth.transport.requests import Request\n from orso.schema import FlatColumn\n from orso.schema import RelationSchema\n from orso.tools import single_item_cache\n@@ -65,6 +68,17 @@\n # sometimes both start here\n self.cached_first_blob = None\n self.client = self._get_storage_client()\n+ self.client_credentials = self.client._credentials\n+\n+ # Cache access tokens for accessing GCS\n+ if not self.client_credentials.valid:\n+ request = Request()\n+ self.client_credentials.refresh(request)\n+ self.access_token = self.client_credentials.token\n+\n+ # Create a HTTP connection session to reduce effort for\n+ # each fetch\n+ self.session = requests.Session()\n \n def _get_storage_client(self):\n from google.cloud import storage\n@@ -76,22 +90,25 @@\n else: # pragma: no cover\n return storage.Client()\n \n- def _get_blob(self, bucket: str, blob_name: str):\n- gcs_bucket = self.client.get_bucket(bucket)\n- blob = gcs_bucket.get_blob(blob_name)\n- return blob\n-\n def read_blob(self, *, blob_name, **kwargs):\n- bucket, object_path, name, extension = paths.get_parts(blob_name)\n+ # For performance we use the GCS API directly, this is roughly 10%\n+ # faster than using the SDK.\n+ bucket, _, _, _ = paths.get_parts(blob_name)\n+\n+ # Ensure the credentials are valid, refreshing them if necessary\n+ if not self.client_credentials.valid:\n+ request = Request()\n+ self.client_credentials.refresh(request)\n+ self.access_token = self.client_credentials.token\n \n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n+ object_full_path = urllib.parse.quote(blob_name[(len(bucket) + 1) :], safe=\"\")\n \n- blob = self._get_blob(\n- bucket=bucket,\n- blob_name=object_path + \"/\" + name + extension,\n- )\n- return blob.download_as_bytes()\n+ url = f\"https://storage.googleapis.com/storage/v1/b/{bucket}/o/{object_full_path}?alt=media\"\n+\n+ response = self.session.get(url, headers={\"Authorization\": f\"Bearer {self.access_token}\"})\n+ return response.content\n \n @single_item_cache\n def get_list_of_blob_names(self, *, prefix: str) -> List[str]:\n", "issue": "\u2728 GCS improvements\nCreate the client object once and reuse\n\nList blobs should only return the name of the blob and not any other details \n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom typing import Dict\nfrom typing import List\n\nimport pyarrow\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import single_item_cache\nfrom orso.types import OrsoTypes\n\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import Cacheable\nfrom opteryx.connectors.capabilities import Partitionable\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import DatasetNotFoundError\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnsupportedFileTypeError\nfrom opteryx.utils import paths\nfrom opteryx.utils.file_decoders import VALID_EXTENSIONS\nfrom opteryx.utils.file_decoders import get_decoder\n\n\nclass GcpCloudStorageConnector(BaseConnector, Cacheable, Partitionable, PredicatePushable):\n __mode__ = \"Blob\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n }\n\n PUSHABLE_TYPES = {OrsoTypes.BOOLEAN, OrsoTypes.DOUBLE, OrsoTypes.INTEGER, OrsoTypes.VARCHAR}\n\n def __init__(self, credentials=None, **kwargs):\n try:\n from google.auth.credentials import AnonymousCredentials\n from google.cloud import storage\n except ImportError as err:\n raise MissingDependencyError(err.name) from err\n\n BaseConnector.__init__(self, **kwargs)\n Partitionable.__init__(self, **kwargs)\n Cacheable.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n self.dataset = self.dataset.replace(\".\", \"/\")\n self.credentials = credentials\n\n # we're going to cache the first blob as the schema and dataset reader\n # sometimes both start here\n self.cached_first_blob = None\n self.client = self._get_storage_client()\n\n def _get_storage_client(self):\n from google.cloud import storage\n\n if os.environ.get(\"STORAGE_EMULATOR_HOST\"):\n from google.auth.credentials import AnonymousCredentials\n\n return storage.Client(credentials=AnonymousCredentials())\n else: # pragma: no cover\n return storage.Client()\n\n def _get_blob(self, bucket: str, blob_name: str):\n gcs_bucket = self.client.get_bucket(bucket)\n blob = gcs_bucket.get_blob(blob_name)\n return blob\n\n def read_blob(self, *, blob_name, **kwargs):\n bucket, object_path, name, extension = paths.get_parts(blob_name)\n\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n\n blob = self._get_blob(\n bucket=bucket,\n blob_name=object_path + \"/\" + name + extension,\n )\n return blob.download_as_bytes()\n\n @single_item_cache\n def get_list_of_blob_names(self, *, prefix: str) -> List[str]:\n bucket, object_path, _, _ = paths.get_parts(prefix)\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n\n gcs_bucket = self.client.get_bucket(bucket)\n blobs = self.client.list_blobs(\n bucket_or_name=gcs_bucket, prefix=object_path, fields=\"items(name)\"\n )\n blobs = (bucket + \"/\" + blob.name for blob in blobs if not blob.name.endswith(\"/\"))\n return [blob for blob in blobs if (\".\" + blob.split(\".\")[-1].lower()) in VALID_EXTENSIONS]\n\n def read_dataset(\n self, columns: list = None, predicates: list = None, **kwargs\n ) -> pyarrow.Table:\n blob_names = self.partition_scheme.get_blobs_in_partition(\n start_date=self.start_date,\n end_date=self.end_date,\n blob_list_getter=self.get_list_of_blob_names,\n prefix=self.dataset,\n )\n\n for blob_name in blob_names:\n try:\n decoder = get_decoder(blob_name)\n blob_bytes = self.read_blob(blob_name=blob_name, statistics=self.statistics)\n yield decoder(blob_bytes, projection=columns, selection=predicates)\n except UnsupportedFileTypeError:\n pass\n\n def get_dataset_schema(self) -> RelationSchema:\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # Read first blob for schema inference and cache it\n record = next(self.read_dataset(), None)\n self.cached_first_blob = record\n\n if record is None:\n raise DatasetNotFoundError(dataset=self.dataset)\n\n arrow_schema = record.schema\n\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[FlatColumn.from_arrow(field) for field in arrow_schema],\n )\n\n return self.schema\n", "path": "opteryx/connectors/gcp_cloudstorage_connector.py"}, {"content": "__build__ = 203\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 12\n_revision = 3\n_status = VersionStatus.BETA\n\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport urllib.request\nfrom typing import Dict\nfrom typing import List\n\nimport pyarrow\nimport requests\nfrom google.auth.transport.requests import Request\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import single_item_cache\nfrom orso.types import OrsoTypes\n\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import Cacheable\nfrom opteryx.connectors.capabilities import Partitionable\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import DatasetNotFoundError\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnsupportedFileTypeError\nfrom opteryx.utils import paths\nfrom opteryx.utils.file_decoders import VALID_EXTENSIONS\nfrom opteryx.utils.file_decoders import get_decoder\n\n\nclass GcpCloudStorageConnector(BaseConnector, Cacheable, Partitionable, PredicatePushable):\n __mode__ = \"Blob\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n }\n\n PUSHABLE_TYPES = {OrsoTypes.BOOLEAN, OrsoTypes.DOUBLE, OrsoTypes.INTEGER, OrsoTypes.VARCHAR}\n\n def __init__(self, credentials=None, **kwargs):\n try:\n from google.auth.credentials import AnonymousCredentials\n from google.cloud import storage\n except ImportError as err:\n raise MissingDependencyError(err.name) from err\n\n BaseConnector.__init__(self, **kwargs)\n Partitionable.__init__(self, **kwargs)\n Cacheable.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n self.dataset = self.dataset.replace(\".\", \"/\")\n self.credentials = credentials\n\n # we're going to cache the first blob as the schema and dataset reader\n # sometimes both start here\n self.cached_first_blob = None\n self.client = self._get_storage_client()\n self.client_credentials = self.client._credentials\n\n # Cache access tokens for accessing GCS\n if not self.client_credentials.valid:\n request = Request()\n self.client_credentials.refresh(request)\n self.access_token = self.client_credentials.token\n\n # Create a HTTP connection session to reduce effort for\n # each fetch\n self.session = requests.Session()\n\n def _get_storage_client(self):\n from google.cloud import storage\n\n if os.environ.get(\"STORAGE_EMULATOR_HOST\"):\n from google.auth.credentials import AnonymousCredentials\n\n return storage.Client(credentials=AnonymousCredentials())\n else: # pragma: no cover\n return storage.Client()\n\n def read_blob(self, *, blob_name, **kwargs):\n # For performance we use the GCS API directly, this is roughly 10%\n # faster than using the SDK.\n bucket, _, _, _ = paths.get_parts(blob_name)\n\n # Ensure the credentials are valid, refreshing them if necessary\n if not self.client_credentials.valid:\n request = Request()\n self.client_credentials.refresh(request)\n self.access_token = self.client_credentials.token\n\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n object_full_path = urllib.parse.quote(blob_name[(len(bucket) + 1) :], safe=\"\")\n\n url = f\"https://storage.googleapis.com/storage/v1/b/{bucket}/o/{object_full_path}?alt=media\"\n\n response = self.session.get(url, headers={\"Authorization\": f\"Bearer {self.access_token}\"})\n return response.content\n\n @single_item_cache\n def get_list_of_blob_names(self, *, prefix: str) -> List[str]:\n bucket, object_path, _, _ = paths.get_parts(prefix)\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n\n gcs_bucket = self.client.get_bucket(bucket)\n blobs = self.client.list_blobs(\n bucket_or_name=gcs_bucket, prefix=object_path, fields=\"items(name)\"\n )\n blobs = (bucket + \"/\" + blob.name for blob in blobs if not blob.name.endswith(\"/\"))\n return [blob for blob in blobs if (\".\" + blob.split(\".\")[-1].lower()) in VALID_EXTENSIONS]\n\n def read_dataset(\n self, columns: list = None, predicates: list = None, **kwargs\n ) -> pyarrow.Table:\n blob_names = self.partition_scheme.get_blobs_in_partition(\n start_date=self.start_date,\n end_date=self.end_date,\n blob_list_getter=self.get_list_of_blob_names,\n prefix=self.dataset,\n )\n\n for blob_name in blob_names:\n try:\n decoder = get_decoder(blob_name)\n blob_bytes = self.read_blob(blob_name=blob_name, statistics=self.statistics)\n yield decoder(blob_bytes, projection=columns, selection=predicates)\n except UnsupportedFileTypeError:\n pass\n\n def get_dataset_schema(self) -> RelationSchema:\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # Read first blob for schema inference and cache it\n record = next(self.read_dataset(), None)\n self.cached_first_blob = record\n\n if record is None:\n raise DatasetNotFoundError(dataset=self.dataset)\n\n arrow_schema = record.schema\n\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[FlatColumn.from_arrow(field) for field in arrow_schema],\n )\n\n return self.schema\n", "path": "opteryx/connectors/gcp_cloudstorage_connector.py"}, {"content": "__build__ = 206\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 12\n_revision = 3\n_status = VersionStatus.BETA\n\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
2,169
745
gh_patches_debug_5311
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-657
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Lower span limit to a debug level message Currently we log these as a warn message, and can be quite loud for logging https://github.com/scoutapp/scout_apm_python/blob/master/src/scout_apm/core/tracked_request.py#L92-L96 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/core/tracked_request.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import datetime as dt 5 import logging 6 from contextlib import contextmanager 7 from uuid import uuid4 8 9 from scout_apm.core import backtrace, objtrace 10 from scout_apm.core.agent.commands import BatchCommand 11 from scout_apm.core.agent.socket import CoreAgentSocketThread 12 from scout_apm.core.n_plus_one_tracker import NPlusOneTracker 13 from scout_apm.core.samplers.memory import get_rss_in_mb 14 from scout_apm.core.samplers.thread import SamplersThread 15 16 logger = logging.getLogger(__name__) 17 18 19 class TrackedRequest(object): 20 """ 21 This is a container which keeps track of all module instances for a single 22 request. For convenience they are made available as attributes based on 23 their keyname 24 """ 25 26 __slots__ = ( 27 "request_id", 28 "start_time", 29 "end_time", 30 "active_spans", 31 "complete_spans", 32 "tags", 33 "is_real_request", 34 "_memory_start", 35 "n_plus_one_tracker", 36 "hit_max", 37 ) 38 39 # Stop adding new spans at this point, to avoid exhausting memory 40 MAX_COMPLETE_SPANS = 1500 41 42 @classmethod 43 def instance(cls): 44 from scout_apm.core.context import context 45 46 return context.get_tracked_request() 47 48 def __init__(self): 49 self.request_id = "req-" + str(uuid4()) 50 self.start_time = dt.datetime.utcnow() 51 self.end_time = None 52 self.active_spans = [] 53 self.complete_spans = [] 54 self.tags = {} 55 self.is_real_request = False 56 self._memory_start = get_rss_in_mb() 57 self.n_plus_one_tracker = NPlusOneTracker() 58 self.hit_max = False 59 logger.debug("Starting request: %s", self.request_id) 60 61 def __repr__(self): 62 # Incomplete to avoid TMI 63 return "<TrackedRequest(request_id={}, tags={})>".format( 64 repr(self.request_id), repr(self.tags) 65 ) 66 67 def tag(self, key, value): 68 if key in self.tags: 69 logger.debug( 70 "Overwriting previously set tag for request %s: %s", 71 self.request_id, 72 key, 73 ) 74 self.tags[key] = value 75 76 def start_span( 77 self, 78 operation, 79 ignore=False, 80 ignore_children=False, 81 should_capture_backtrace=True, 82 ): 83 parent = self.current_span() 84 if parent is not None: 85 parent_id = parent.span_id 86 if parent.ignore_children: 87 ignore = True 88 ignore_children = True 89 else: 90 parent_id = None 91 92 if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS: 93 if not self.hit_max: 94 logger.warning( 95 "Hit the maximum number of spans, this trace will be incomplete." 96 ) 97 self.hit_max = True 98 ignore = True 99 ignore_children = True 100 101 new_span = Span( 102 request_id=self.request_id, 103 operation=operation, 104 ignore=ignore, 105 ignore_children=ignore_children, 106 parent=parent_id, 107 should_capture_backtrace=should_capture_backtrace, 108 ) 109 self.active_spans.append(new_span) 110 return new_span 111 112 def stop_span(self): 113 try: 114 stopping_span = self.active_spans.pop() 115 except IndexError as exc: 116 logger.debug("Exception when stopping span", exc_info=exc) 117 else: 118 stopping_span.stop() 119 if not stopping_span.ignore: 120 stopping_span.annotate() 121 self.complete_spans.append(stopping_span) 122 123 if len(self.active_spans) == 0: 124 self.finish() 125 126 @contextmanager 127 def span(self, *args, **kwargs): 128 span = self.start_span(*args, **kwargs) 129 try: 130 yield span 131 finally: 132 self.stop_span() 133 134 def current_span(self): 135 if self.active_spans: 136 return self.active_spans[-1] 137 else: 138 return None 139 140 # Request is done, release any info we have about it. 141 def finish(self): 142 logger.debug("Stopping request: %s", self.request_id) 143 if self.end_time is None: 144 self.end_time = dt.datetime.utcnow() 145 if self.is_real_request: 146 self.tag("mem_delta", self._get_mem_delta()) 147 if not self.is_ignored(): 148 batch_command = BatchCommand.from_tracked_request(self) 149 CoreAgentSocketThread.send(batch_command) 150 SamplersThread.ensure_started() 151 152 details = " ".join( 153 "{}={}".format(key, value) 154 for key, value in [ 155 ("start_time", self.start_time), 156 ("end_time", self.end_time), 157 ("duration", (self.end_time - self.start_time).total_seconds()), 158 ("active_spans", len(self.active_spans)), 159 ("complete_spans", len(self.complete_spans)), 160 ("tags", len(self.tags)), 161 ("hit_max", self.hit_max), 162 ("is_real_request", self.is_real_request), 163 ] 164 ) 165 logger.debug("Request %s %s", self.request_id, details) 166 167 from scout_apm.core.context import context 168 169 context.clear_tracked_request(self) 170 171 def _get_mem_delta(self): 172 current_mem = get_rss_in_mb() 173 if current_mem > self._memory_start: 174 return current_mem - self._memory_start 175 return 0.0 176 177 # A request is ignored if the tag "ignore_transaction" is set to True 178 def is_ignored(self): 179 return self.tags.get("ignore_transaction", False) 180 181 182 class Span(object): 183 __slots__ = ( 184 "span_id", 185 "start_time", 186 "end_time", 187 "request_id", 188 "operation", 189 "ignore", 190 "ignore_children", 191 "parent", 192 "tags", 193 "start_objtrace_counts", 194 "end_objtrace_counts", 195 "should_capture_backtrace", 196 ) 197 198 def __init__( 199 self, 200 request_id=None, 201 operation=None, 202 ignore=False, 203 ignore_children=False, 204 parent=None, 205 should_capture_backtrace=True, 206 ): 207 self.span_id = "span-" + str(uuid4()) 208 self.start_time = dt.datetime.utcnow() 209 self.end_time = None 210 self.request_id = request_id 211 self.operation = operation 212 self.ignore = ignore 213 self.ignore_children = ignore_children 214 self.parent = parent 215 self.tags = {} 216 self.start_objtrace_counts = objtrace.get_counts() 217 self.end_objtrace_counts = (0, 0, 0, 0) 218 self.should_capture_backtrace = should_capture_backtrace 219 220 def __repr__(self): 221 # Incomplete to avoid TMI 222 return "<Span(span_id={}, operation={}, ignore={}, tags={})>".format( 223 repr(self.span_id), repr(self.operation), repr(self.ignore), repr(self.tags) 224 ) 225 226 def stop(self): 227 self.end_time = dt.datetime.utcnow() 228 self.end_objtrace_counts = objtrace.get_counts() 229 230 def tag(self, key, value): 231 if key in self.tags: 232 logger.debug( 233 "Overwriting previously set tag for span %s: %s", self.span_id, key 234 ) 235 self.tags[key] = value 236 237 # In seconds 238 def duration(self): 239 if self.end_time is not None: 240 return (self.end_time - self.start_time).total_seconds() 241 else: 242 # Current, running duration 243 return (dt.datetime.utcnow() - self.start_time).total_seconds() 244 245 # Add any interesting annotations to the span. Assumes that we are in the 246 # process of stopping this span. 247 def annotate(self): 248 self.add_allocation_tags() 249 if not self.should_capture_backtrace: 250 return 251 slow_threshold = 0.5 252 if self.duration() > slow_threshold: 253 self.capture_backtrace() 254 255 def add_allocation_tags(self): 256 if not objtrace.is_extension: 257 return 258 259 start_allocs = ( 260 self.start_objtrace_counts[0] 261 + self.start_objtrace_counts[1] 262 + self.start_objtrace_counts[2] 263 ) 264 end_allocs = ( 265 self.end_objtrace_counts[0] 266 + self.end_objtrace_counts[1] 267 + self.end_objtrace_counts[2] 268 ) 269 270 # If even one of the counters rolled over, we're pretty much 271 # guaranteed to have end_allocs be less than start_allocs. 272 # This should rarely happen. Max Unsigned Long Long is a big number 273 if end_allocs - start_allocs < 0: 274 logger.debug( 275 "End allocation count smaller than start allocation " 276 "count for span %s: start = %d, end = %d", 277 self.span_id, 278 start_allocs, 279 end_allocs, 280 ) 281 return 282 283 self.tag("allocations", end_allocs - start_allocs) 284 self.tag("start_allocations", start_allocs) 285 self.tag("stop_allocations", end_allocs) 286 287 def capture_backtrace(self): 288 self.tag("stack", backtrace.capture()) 289 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/core/tracked_request.py b/src/scout_apm/core/tracked_request.py --- a/src/scout_apm/core/tracked_request.py +++ b/src/scout_apm/core/tracked_request.py @@ -91,7 +91,7 @@ if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS: if not self.hit_max: - logger.warning( + logger.debug( "Hit the maximum number of spans, this trace will be incomplete." ) self.hit_max = True
{"golden_diff": "diff --git a/src/scout_apm/core/tracked_request.py b/src/scout_apm/core/tracked_request.py\n--- a/src/scout_apm/core/tracked_request.py\n+++ b/src/scout_apm/core/tracked_request.py\n@@ -91,7 +91,7 @@\n \n if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS:\n if not self.hit_max:\n- logger.warning(\n+ logger.debug(\n \"Hit the maximum number of spans, this trace will be incomplete.\"\n )\n self.hit_max = True\n", "issue": "Lower span limit to a debug level message\nCurrently we log these as a warn message, and can be quite loud for logging\r\n\r\nhttps://github.com/scoutapp/scout_apm_python/blob/master/src/scout_apm/core/tracked_request.py#L92-L96\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\nfrom contextlib import contextmanager\nfrom uuid import uuid4\n\nfrom scout_apm.core import backtrace, objtrace\nfrom scout_apm.core.agent.commands import BatchCommand\nfrom scout_apm.core.agent.socket import CoreAgentSocketThread\nfrom scout_apm.core.n_plus_one_tracker import NPlusOneTracker\nfrom scout_apm.core.samplers.memory import get_rss_in_mb\nfrom scout_apm.core.samplers.thread import SamplersThread\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrackedRequest(object):\n \"\"\"\n This is a container which keeps track of all module instances for a single\n request. For convenience they are made available as attributes based on\n their keyname\n \"\"\"\n\n __slots__ = (\n \"request_id\",\n \"start_time\",\n \"end_time\",\n \"active_spans\",\n \"complete_spans\",\n \"tags\",\n \"is_real_request\",\n \"_memory_start\",\n \"n_plus_one_tracker\",\n \"hit_max\",\n )\n\n # Stop adding new spans at this point, to avoid exhausting memory\n MAX_COMPLETE_SPANS = 1500\n\n @classmethod\n def instance(cls):\n from scout_apm.core.context import context\n\n return context.get_tracked_request()\n\n def __init__(self):\n self.request_id = \"req-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.active_spans = []\n self.complete_spans = []\n self.tags = {}\n self.is_real_request = False\n self._memory_start = get_rss_in_mb()\n self.n_plus_one_tracker = NPlusOneTracker()\n self.hit_max = False\n logger.debug(\"Starting request: %s\", self.request_id)\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<TrackedRequest(request_id={}, tags={})>\".format(\n repr(self.request_id), repr(self.tags)\n )\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for request %s: %s\",\n self.request_id,\n key,\n )\n self.tags[key] = value\n\n def start_span(\n self,\n operation,\n ignore=False,\n ignore_children=False,\n should_capture_backtrace=True,\n ):\n parent = self.current_span()\n if parent is not None:\n parent_id = parent.span_id\n if parent.ignore_children:\n ignore = True\n ignore_children = True\n else:\n parent_id = None\n\n if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS:\n if not self.hit_max:\n logger.warning(\n \"Hit the maximum number of spans, this trace will be incomplete.\"\n )\n self.hit_max = True\n ignore = True\n ignore_children = True\n\n new_span = Span(\n request_id=self.request_id,\n operation=operation,\n ignore=ignore,\n ignore_children=ignore_children,\n parent=parent_id,\n should_capture_backtrace=should_capture_backtrace,\n )\n self.active_spans.append(new_span)\n return new_span\n\n def stop_span(self):\n try:\n stopping_span = self.active_spans.pop()\n except IndexError as exc:\n logger.debug(\"Exception when stopping span\", exc_info=exc)\n else:\n stopping_span.stop()\n if not stopping_span.ignore:\n stopping_span.annotate()\n self.complete_spans.append(stopping_span)\n\n if len(self.active_spans) == 0:\n self.finish()\n\n @contextmanager\n def span(self, *args, **kwargs):\n span = self.start_span(*args, **kwargs)\n try:\n yield span\n finally:\n self.stop_span()\n\n def current_span(self):\n if self.active_spans:\n return self.active_spans[-1]\n else:\n return None\n\n # Request is done, release any info we have about it.\n def finish(self):\n logger.debug(\"Stopping request: %s\", self.request_id)\n if self.end_time is None:\n self.end_time = dt.datetime.utcnow()\n if self.is_real_request:\n self.tag(\"mem_delta\", self._get_mem_delta())\n if not self.is_ignored():\n batch_command = BatchCommand.from_tracked_request(self)\n CoreAgentSocketThread.send(batch_command)\n SamplersThread.ensure_started()\n\n details = \" \".join(\n \"{}={}\".format(key, value)\n for key, value in [\n (\"start_time\", self.start_time),\n (\"end_time\", self.end_time),\n (\"duration\", (self.end_time - self.start_time).total_seconds()),\n (\"active_spans\", len(self.active_spans)),\n (\"complete_spans\", len(self.complete_spans)),\n (\"tags\", len(self.tags)),\n (\"hit_max\", self.hit_max),\n (\"is_real_request\", self.is_real_request),\n ]\n )\n logger.debug(\"Request %s %s\", self.request_id, details)\n\n from scout_apm.core.context import context\n\n context.clear_tracked_request(self)\n\n def _get_mem_delta(self):\n current_mem = get_rss_in_mb()\n if current_mem > self._memory_start:\n return current_mem - self._memory_start\n return 0.0\n\n # A request is ignored if the tag \"ignore_transaction\" is set to True\n def is_ignored(self):\n return self.tags.get(\"ignore_transaction\", False)\n\n\nclass Span(object):\n __slots__ = (\n \"span_id\",\n \"start_time\",\n \"end_time\",\n \"request_id\",\n \"operation\",\n \"ignore\",\n \"ignore_children\",\n \"parent\",\n \"tags\",\n \"start_objtrace_counts\",\n \"end_objtrace_counts\",\n \"should_capture_backtrace\",\n )\n\n def __init__(\n self,\n request_id=None,\n operation=None,\n ignore=False,\n ignore_children=False,\n parent=None,\n should_capture_backtrace=True,\n ):\n self.span_id = \"span-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.request_id = request_id\n self.operation = operation\n self.ignore = ignore\n self.ignore_children = ignore_children\n self.parent = parent\n self.tags = {}\n self.start_objtrace_counts = objtrace.get_counts()\n self.end_objtrace_counts = (0, 0, 0, 0)\n self.should_capture_backtrace = should_capture_backtrace\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<Span(span_id={}, operation={}, ignore={}, tags={})>\".format(\n repr(self.span_id), repr(self.operation), repr(self.ignore), repr(self.tags)\n )\n\n def stop(self):\n self.end_time = dt.datetime.utcnow()\n self.end_objtrace_counts = objtrace.get_counts()\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for span %s: %s\", self.span_id, key\n )\n self.tags[key] = value\n\n # In seconds\n def duration(self):\n if self.end_time is not None:\n return (self.end_time - self.start_time).total_seconds()\n else:\n # Current, running duration\n return (dt.datetime.utcnow() - self.start_time).total_seconds()\n\n # Add any interesting annotations to the span. Assumes that we are in the\n # process of stopping this span.\n def annotate(self):\n self.add_allocation_tags()\n if not self.should_capture_backtrace:\n return\n slow_threshold = 0.5\n if self.duration() > slow_threshold:\n self.capture_backtrace()\n\n def add_allocation_tags(self):\n if not objtrace.is_extension:\n return\n\n start_allocs = (\n self.start_objtrace_counts[0]\n + self.start_objtrace_counts[1]\n + self.start_objtrace_counts[2]\n )\n end_allocs = (\n self.end_objtrace_counts[0]\n + self.end_objtrace_counts[1]\n + self.end_objtrace_counts[2]\n )\n\n # If even one of the counters rolled over, we're pretty much\n # guaranteed to have end_allocs be less than start_allocs.\n # This should rarely happen. Max Unsigned Long Long is a big number\n if end_allocs - start_allocs < 0:\n logger.debug(\n \"End allocation count smaller than start allocation \"\n \"count for span %s: start = %d, end = %d\",\n self.span_id,\n start_allocs,\n end_allocs,\n )\n return\n\n self.tag(\"allocations\", end_allocs - start_allocs)\n self.tag(\"start_allocations\", start_allocs)\n self.tag(\"stop_allocations\", end_allocs)\n\n def capture_backtrace(self):\n self.tag(\"stack\", backtrace.capture())\n", "path": "src/scout_apm/core/tracked_request.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\nimport logging\nfrom contextlib import contextmanager\nfrom uuid import uuid4\n\nfrom scout_apm.core import backtrace, objtrace\nfrom scout_apm.core.agent.commands import BatchCommand\nfrom scout_apm.core.agent.socket import CoreAgentSocketThread\nfrom scout_apm.core.n_plus_one_tracker import NPlusOneTracker\nfrom scout_apm.core.samplers.memory import get_rss_in_mb\nfrom scout_apm.core.samplers.thread import SamplersThread\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrackedRequest(object):\n \"\"\"\n This is a container which keeps track of all module instances for a single\n request. For convenience they are made available as attributes based on\n their keyname\n \"\"\"\n\n __slots__ = (\n \"request_id\",\n \"start_time\",\n \"end_time\",\n \"active_spans\",\n \"complete_spans\",\n \"tags\",\n \"is_real_request\",\n \"_memory_start\",\n \"n_plus_one_tracker\",\n \"hit_max\",\n )\n\n # Stop adding new spans at this point, to avoid exhausting memory\n MAX_COMPLETE_SPANS = 1500\n\n @classmethod\n def instance(cls):\n from scout_apm.core.context import context\n\n return context.get_tracked_request()\n\n def __init__(self):\n self.request_id = \"req-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.active_spans = []\n self.complete_spans = []\n self.tags = {}\n self.is_real_request = False\n self._memory_start = get_rss_in_mb()\n self.n_plus_one_tracker = NPlusOneTracker()\n self.hit_max = False\n logger.debug(\"Starting request: %s\", self.request_id)\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<TrackedRequest(request_id={}, tags={})>\".format(\n repr(self.request_id), repr(self.tags)\n )\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for request %s: %s\",\n self.request_id,\n key,\n )\n self.tags[key] = value\n\n def start_span(\n self,\n operation,\n ignore=False,\n ignore_children=False,\n should_capture_backtrace=True,\n ):\n parent = self.current_span()\n if parent is not None:\n parent_id = parent.span_id\n if parent.ignore_children:\n ignore = True\n ignore_children = True\n else:\n parent_id = None\n\n if len(self.complete_spans) >= self.MAX_COMPLETE_SPANS:\n if not self.hit_max:\n logger.debug(\n \"Hit the maximum number of spans, this trace will be incomplete.\"\n )\n self.hit_max = True\n ignore = True\n ignore_children = True\n\n new_span = Span(\n request_id=self.request_id,\n operation=operation,\n ignore=ignore,\n ignore_children=ignore_children,\n parent=parent_id,\n should_capture_backtrace=should_capture_backtrace,\n )\n self.active_spans.append(new_span)\n return new_span\n\n def stop_span(self):\n try:\n stopping_span = self.active_spans.pop()\n except IndexError as exc:\n logger.debug(\"Exception when stopping span\", exc_info=exc)\n else:\n stopping_span.stop()\n if not stopping_span.ignore:\n stopping_span.annotate()\n self.complete_spans.append(stopping_span)\n\n if len(self.active_spans) == 0:\n self.finish()\n\n @contextmanager\n def span(self, *args, **kwargs):\n span = self.start_span(*args, **kwargs)\n try:\n yield span\n finally:\n self.stop_span()\n\n def current_span(self):\n if self.active_spans:\n return self.active_spans[-1]\n else:\n return None\n\n # Request is done, release any info we have about it.\n def finish(self):\n logger.debug(\"Stopping request: %s\", self.request_id)\n if self.end_time is None:\n self.end_time = dt.datetime.utcnow()\n if self.is_real_request:\n self.tag(\"mem_delta\", self._get_mem_delta())\n if not self.is_ignored():\n batch_command = BatchCommand.from_tracked_request(self)\n CoreAgentSocketThread.send(batch_command)\n SamplersThread.ensure_started()\n\n details = \" \".join(\n \"{}={}\".format(key, value)\n for key, value in [\n (\"start_time\", self.start_time),\n (\"end_time\", self.end_time),\n (\"duration\", (self.end_time - self.start_time).total_seconds()),\n (\"active_spans\", len(self.active_spans)),\n (\"complete_spans\", len(self.complete_spans)),\n (\"tags\", len(self.tags)),\n (\"hit_max\", self.hit_max),\n (\"is_real_request\", self.is_real_request),\n ]\n )\n logger.debug(\"Request %s %s\", self.request_id, details)\n\n from scout_apm.core.context import context\n\n context.clear_tracked_request(self)\n\n def _get_mem_delta(self):\n current_mem = get_rss_in_mb()\n if current_mem > self._memory_start:\n return current_mem - self._memory_start\n return 0.0\n\n # A request is ignored if the tag \"ignore_transaction\" is set to True\n def is_ignored(self):\n return self.tags.get(\"ignore_transaction\", False)\n\n\nclass Span(object):\n __slots__ = (\n \"span_id\",\n \"start_time\",\n \"end_time\",\n \"request_id\",\n \"operation\",\n \"ignore\",\n \"ignore_children\",\n \"parent\",\n \"tags\",\n \"start_objtrace_counts\",\n \"end_objtrace_counts\",\n \"should_capture_backtrace\",\n )\n\n def __init__(\n self,\n request_id=None,\n operation=None,\n ignore=False,\n ignore_children=False,\n parent=None,\n should_capture_backtrace=True,\n ):\n self.span_id = \"span-\" + str(uuid4())\n self.start_time = dt.datetime.utcnow()\n self.end_time = None\n self.request_id = request_id\n self.operation = operation\n self.ignore = ignore\n self.ignore_children = ignore_children\n self.parent = parent\n self.tags = {}\n self.start_objtrace_counts = objtrace.get_counts()\n self.end_objtrace_counts = (0, 0, 0, 0)\n self.should_capture_backtrace = should_capture_backtrace\n\n def __repr__(self):\n # Incomplete to avoid TMI\n return \"<Span(span_id={}, operation={}, ignore={}, tags={})>\".format(\n repr(self.span_id), repr(self.operation), repr(self.ignore), repr(self.tags)\n )\n\n def stop(self):\n self.end_time = dt.datetime.utcnow()\n self.end_objtrace_counts = objtrace.get_counts()\n\n def tag(self, key, value):\n if key in self.tags:\n logger.debug(\n \"Overwriting previously set tag for span %s: %s\", self.span_id, key\n )\n self.tags[key] = value\n\n # In seconds\n def duration(self):\n if self.end_time is not None:\n return (self.end_time - self.start_time).total_seconds()\n else:\n # Current, running duration\n return (dt.datetime.utcnow() - self.start_time).total_seconds()\n\n # Add any interesting annotations to the span. Assumes that we are in the\n # process of stopping this span.\n def annotate(self):\n self.add_allocation_tags()\n if not self.should_capture_backtrace:\n return\n slow_threshold = 0.5\n if self.duration() > slow_threshold:\n self.capture_backtrace()\n\n def add_allocation_tags(self):\n if not objtrace.is_extension:\n return\n\n start_allocs = (\n self.start_objtrace_counts[0]\n + self.start_objtrace_counts[1]\n + self.start_objtrace_counts[2]\n )\n end_allocs = (\n self.end_objtrace_counts[0]\n + self.end_objtrace_counts[1]\n + self.end_objtrace_counts[2]\n )\n\n # If even one of the counters rolled over, we're pretty much\n # guaranteed to have end_allocs be less than start_allocs.\n # This should rarely happen. Max Unsigned Long Long is a big number\n if end_allocs - start_allocs < 0:\n logger.debug(\n \"End allocation count smaller than start allocation \"\n \"count for span %s: start = %d, end = %d\",\n self.span_id,\n start_allocs,\n end_allocs,\n )\n return\n\n self.tag(\"allocations\", end_allocs - start_allocs)\n self.tag(\"start_allocations\", start_allocs)\n self.tag(\"stop_allocations\", end_allocs)\n\n def capture_backtrace(self):\n self.tag(\"stack\", backtrace.capture())\n", "path": "src/scout_apm/core/tracked_request.py"}]}
3,049
121
gh_patches_debug_5304
rasdani/github-patches
git_diff
getredash__redash-4682
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Clickhouse column name encoding problem <!-- We use GitHub only for bug reports πŸ› Anything else should be posted to https://discuss.redash.io πŸ‘« 🚨For support, help & questions use https://discuss.redash.io/c/support πŸ’‘For feature requests & ideas use https://discuss.redash.io/c/feature-requests **Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key. --> ### Issue Summary Alias column name contains non 'latin-1' characters return encoding error. For example : select count(*) as 'Χ›ΧžΧ•Χͺ'… **Error message**: 'latin-1' codec can't encode characters in position 285-288: Body ('Χ›ΧžΧ•Χͺ') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8. BTW, This works fine with other data source like mysql. ### Steps to Reproduce 1. This is the first step Create a query with ClickHouse as the data source 2. This is the second step, etc. Add a column alias in utf8 chars like : select colum1 as 'Χ©' Any other info e.g. Why do you consider this to be a bug? What did you expect to happen instead? I expected to see the column alias like I do with mysql data source . No problem with the 'utf-8' data, so probably column names should support 'utf-8' charset as well. This used to work with the older version (v8) ### Technical details: * Redash Version: 9.0.0-alpha * Browser/OS: Β Chrome * How did you install Redash: Docker Based Developer Installation --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/query_runner/clickhouse.py` Content: ``` 1 import logging 2 import re 3 4 import requests 5 6 from redash.query_runner import * 7 from redash.utils import json_dumps, json_loads 8 9 logger = logging.getLogger(__name__) 10 11 12 class ClickHouse(BaseSQLQueryRunner): 13 noop_query = "SELECT 1" 14 15 @classmethod 16 def configuration_schema(cls): 17 return { 18 "type": "object", 19 "properties": { 20 "url": {"type": "string", "default": "http://127.0.0.1:8123"}, 21 "user": {"type": "string", "default": "default"}, 22 "password": {"type": "string"}, 23 "dbname": {"type": "string", "title": "Database Name"}, 24 "timeout": { 25 "type": "number", 26 "title": "Request Timeout", 27 "default": 30, 28 }, 29 "verify": { 30 "type": "boolean", 31 "title": "Verify SSL certificate", 32 "default": True, 33 }, 34 }, 35 "order": ["url", "user", "password", "dbname"], 36 "required": ["dbname"], 37 "extra_options": ["timeout", "verify"], 38 "secret": ["password"], 39 } 40 41 @classmethod 42 def type(cls): 43 return "clickhouse" 44 45 def _get_tables(self, schema): 46 query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')" 47 48 results, error = self.run_query(query, None) 49 50 if error is not None: 51 raise Exception("Failed getting schema.") 52 53 results = json_loads(results) 54 55 for row in results["rows"]: 56 table_name = "{}.{}".format(row["database"], row["table"]) 57 58 if table_name not in schema: 59 schema[table_name] = {"name": table_name, "columns": []} 60 61 schema[table_name]["columns"].append(row["name"]) 62 63 return list(schema.values()) 64 65 def _send_query(self, data, stream=False): 66 url = self.configuration.get("url", "http://127.0.0.1:8123") 67 try: 68 verify = self.configuration.get("verify", True) 69 r = requests.post( 70 url, 71 data=data, 72 stream=stream, 73 timeout=self.configuration.get("timeout", 30), 74 params={ 75 "user": self.configuration.get("user", "default"), 76 "password": self.configuration.get("password", ""), 77 "database": self.configuration["dbname"], 78 }, 79 verify=verify, 80 ) 81 if r.status_code != 200: 82 raise Exception(r.text) 83 # logging.warning(r.json()) 84 return r.json() 85 except requests.RequestException as e: 86 if e.response: 87 details = "({}, Status Code: {})".format( 88 e.__class__.__name__, e.response.status_code 89 ) 90 else: 91 details = "({})".format(e.__class__.__name__) 92 raise Exception("Connection error to: {} {}.".format(url, details)) 93 94 @staticmethod 95 def _define_column_type(column): 96 c = column.lower() 97 f = re.search(r"^nullable\((.*)\)$", c) 98 if f is not None: 99 c = f.group(1) 100 if c.startswith("int") or c.startswith("uint"): 101 return TYPE_INTEGER 102 elif c.startswith("float"): 103 return TYPE_FLOAT 104 elif c == "datetime": 105 return TYPE_DATETIME 106 elif c == "date": 107 return TYPE_DATE 108 else: 109 return TYPE_STRING 110 111 def _clickhouse_query(self, query): 112 query += "\nFORMAT JSON" 113 result = self._send_query(query) 114 columns = [] 115 columns_int64 = [] # db converts value to string if its type equals UInt64 116 columns_totals = {} 117 118 for r in result["meta"]: 119 column_name = r["name"] 120 column_type = self._define_column_type(r["type"]) 121 122 if r["type"] in ("Int64", "UInt64", "Nullable(Int64)", "Nullable(UInt64)"): 123 columns_int64.append(column_name) 124 else: 125 columns_totals[column_name] = ( 126 "Total" if column_type == TYPE_STRING else None 127 ) 128 129 columns.append( 130 {"name": column_name, "friendly_name": column_name, "type": column_type} 131 ) 132 133 rows = result["data"] 134 for row in rows: 135 for column in columns_int64: 136 try: 137 row[column] = int(row[column]) 138 except TypeError: 139 row[column] = None 140 141 if "totals" in result: 142 totals = result["totals"] 143 for column, value in columns_totals.items(): 144 totals[column] = value 145 rows.append(totals) 146 147 return {"columns": columns, "rows": rows} 148 149 def run_query(self, query, user): 150 logger.debug("Clickhouse is about to execute query: %s", query) 151 if query == "": 152 json_data = None 153 error = "Query is empty" 154 return json_data, error 155 try: 156 q = self._clickhouse_query(query) 157 data = json_dumps(q) 158 error = None 159 except Exception as e: 160 data = None 161 logging.exception(e) 162 error = str(e) 163 return data, error 164 165 166 register(ClickHouse) 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py --- a/redash/query_runner/clickhouse.py +++ b/redash/query_runner/clickhouse.py @@ -68,7 +68,7 @@ verify = self.configuration.get("verify", True) r = requests.post( url, - data=data, + data=data.encode("utf-8","ignore"), stream=stream, timeout=self.configuration.get("timeout", 30), params={
{"golden_diff": "diff --git a/redash/query_runner/clickhouse.py b/redash/query_runner/clickhouse.py\n--- a/redash/query_runner/clickhouse.py\n+++ b/redash/query_runner/clickhouse.py\n@@ -68,7 +68,7 @@\n verify = self.configuration.get(\"verify\", True)\n r = requests.post(\n url,\n- data=data,\n+ data=data.encode(\"utf-8\",\"ignore\"),\n stream=stream,\n timeout=self.configuration.get(\"timeout\", 30),\n params={\n", "issue": "Clickhouse column name encoding problem\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nAlias column name contains non 'latin-1' characters return encoding error.\r\nFor example : select count(*) as '\u05db\u05de\u05d5\u05ea'\u2026\r\n\r\n**Error message**: 'latin-1' codec can't encode characters in position 285-288: Body ('\u05db\u05de\u05d5\u05ea') is not valid Latin-1. Use body.encode('utf-8') if you want to send it encoded in UTF-8.\r\n\r\nBTW, This works fine with other data source like mysql.\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. This is the first step\r\nCreate a query with ClickHouse as the data source\r\n\r\n2. This is the second step, etc.\r\nAdd a column alias in utf8 chars like : select colum1 as '\u05e9'\r\n\r\nAny other info e.g. Why do you consider this to be a bug? What did you expect to happen instead?\r\n\r\nI expected to see the column alias like I do with mysql data source .\r\nNo problem with the 'utf-8' data, so probably column names should support 'utf-8' charset as well.\r\nThis used to work with the older version (v8)\r\n### Technical details:\r\n\r\n* Redash Version:\r\n9.0.0-alpha\r\n* Browser/OS:\r\n\u00a0Chrome\r\n* How did you install Redash:\r\nDocker Based Developer Installation\n", "before_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\"type\": \"string\", \"default\": \"http://127.0.0.1:8123\"},\n \"user\": {\"type\": \"string\", \"default\": \"default\"},\n \"password\": {\"type\": \"string\"},\n \"dbname\": {\"type\": \"string\", \"title\": \"Database Name\"},\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30,\n },\n \"verify\": {\n \"type\": \"boolean\",\n \"title\": \"Verify SSL certificate\",\n \"default\": True,\n },\n },\n \"order\": [\"url\", \"user\", \"password\", \"dbname\"],\n \"required\": [\"dbname\"],\n \"extra_options\": [\"timeout\", \"verify\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = \"{}.{}\".format(row[\"database\"], row[\"table\"])\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"name\"])\n\n return list(schema.values())\n\n def _send_query(self, data, stream=False):\n url = self.configuration.get(\"url\", \"http://127.0.0.1:8123\")\n try:\n verify = self.configuration.get(\"verify\", True)\n r = requests.post(\n url,\n data=data,\n stream=stream,\n timeout=self.configuration.get(\"timeout\", 30),\n params={\n \"user\": self.configuration.get(\"user\", \"default\"),\n \"password\": self.configuration.get(\"password\", \"\"),\n \"database\": self.configuration[\"dbname\"],\n },\n verify=verify,\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n except requests.RequestException as e:\n if e.response:\n details = \"({}, Status Code: {})\".format(\n e.__class__.__name__, e.response.status_code\n )\n else:\n details = \"({})\".format(e.__class__.__name__)\n raise Exception(\"Connection error to: {} {}.\".format(url, details))\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r\"^nullable\\((.*)\\)$\", c)\n if f is not None:\n c = f.group(1)\n if c.startswith(\"int\") or c.startswith(\"uint\"):\n return TYPE_INTEGER\n elif c.startswith(\"float\"):\n return TYPE_FLOAT\n elif c == \"datetime\":\n return TYPE_DATETIME\n elif c == \"date\":\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += \"\\nFORMAT JSON\"\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result[\"meta\"]:\n column_name = r[\"name\"]\n column_type = self._define_column_type(r[\"type\"])\n\n if r[\"type\"] in (\"Int64\", \"UInt64\", \"Nullable(Int64)\", \"Nullable(UInt64)\"):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = (\n \"Total\" if column_type == TYPE_STRING else None\n )\n\n columns.append(\n {\"name\": column_name, \"friendly_name\": column_name, \"type\": column_type}\n )\n\n rows = result[\"data\"]\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if \"totals\" in result:\n totals = result[\"totals\"]\n for column, value in columns_totals.items():\n totals[column] = value\n rows.append(totals)\n\n return {\"columns\": columns, \"rows\": rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = str(e)\n return data, error\n\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}], "after_files": [{"content": "import logging\nimport re\n\nimport requests\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClickHouse(BaseSQLQueryRunner):\n noop_query = \"SELECT 1\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"url\": {\"type\": \"string\", \"default\": \"http://127.0.0.1:8123\"},\n \"user\": {\"type\": \"string\", \"default\": \"default\"},\n \"password\": {\"type\": \"string\"},\n \"dbname\": {\"type\": \"string\", \"title\": \"Database Name\"},\n \"timeout\": {\n \"type\": \"number\",\n \"title\": \"Request Timeout\",\n \"default\": 30,\n },\n \"verify\": {\n \"type\": \"boolean\",\n \"title\": \"Verify SSL certificate\",\n \"default\": True,\n },\n },\n \"order\": [\"url\", \"user\", \"password\", \"dbname\"],\n \"required\": [\"dbname\"],\n \"extra_options\": [\"timeout\", \"verify\"],\n \"secret\": [\"password\"],\n }\n\n @classmethod\n def type(cls):\n return \"clickhouse\"\n\n def _get_tables(self, schema):\n query = \"SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')\"\n\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = \"{}.{}\".format(row[\"database\"], row[\"table\"])\n\n if table_name not in schema:\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n\n schema[table_name][\"columns\"].append(row[\"name\"])\n\n return list(schema.values())\n\n def _send_query(self, data, stream=False):\n url = self.configuration.get(\"url\", \"http://127.0.0.1:8123\")\n try:\n verify = self.configuration.get(\"verify\", True)\n r = requests.post(\n url,\n data=data.encode(\"utf-8\",\"ignore\"),\n stream=stream,\n timeout=self.configuration.get(\"timeout\", 30),\n params={\n \"user\": self.configuration.get(\"user\", \"default\"),\n \"password\": self.configuration.get(\"password\", \"\"),\n \"database\": self.configuration[\"dbname\"],\n },\n verify=verify,\n )\n if r.status_code != 200:\n raise Exception(r.text)\n # logging.warning(r.json())\n return r.json()\n except requests.RequestException as e:\n if e.response:\n details = \"({}, Status Code: {})\".format(\n e.__class__.__name__, e.response.status_code\n )\n else:\n details = \"({})\".format(e.__class__.__name__)\n raise Exception(\"Connection error to: {} {}.\".format(url, details))\n\n @staticmethod\n def _define_column_type(column):\n c = column.lower()\n f = re.search(r\"^nullable\\((.*)\\)$\", c)\n if f is not None:\n c = f.group(1)\n if c.startswith(\"int\") or c.startswith(\"uint\"):\n return TYPE_INTEGER\n elif c.startswith(\"float\"):\n return TYPE_FLOAT\n elif c == \"datetime\":\n return TYPE_DATETIME\n elif c == \"date\":\n return TYPE_DATE\n else:\n return TYPE_STRING\n\n def _clickhouse_query(self, query):\n query += \"\\nFORMAT JSON\"\n result = self._send_query(query)\n columns = []\n columns_int64 = [] # db converts value to string if its type equals UInt64\n columns_totals = {}\n\n for r in result[\"meta\"]:\n column_name = r[\"name\"]\n column_type = self._define_column_type(r[\"type\"])\n\n if r[\"type\"] in (\"Int64\", \"UInt64\", \"Nullable(Int64)\", \"Nullable(UInt64)\"):\n columns_int64.append(column_name)\n else:\n columns_totals[column_name] = (\n \"Total\" if column_type == TYPE_STRING else None\n )\n\n columns.append(\n {\"name\": column_name, \"friendly_name\": column_name, \"type\": column_type}\n )\n\n rows = result[\"data\"]\n for row in rows:\n for column in columns_int64:\n try:\n row[column] = int(row[column])\n except TypeError:\n row[column] = None\n\n if \"totals\" in result:\n totals = result[\"totals\"]\n for column, value in columns_totals.items():\n totals[column] = value\n rows.append(totals)\n\n return {\"columns\": columns, \"rows\": rows}\n\n def run_query(self, query, user):\n logger.debug(\"Clickhouse is about to execute query: %s\", query)\n if query == \"\":\n json_data = None\n error = \"Query is empty\"\n return json_data, error\n try:\n q = self._clickhouse_query(query)\n data = json_dumps(q)\n error = None\n except Exception as e:\n data = None\n logging.exception(e)\n error = str(e)\n return data, error\n\n\nregister(ClickHouse)\n", "path": "redash/query_runner/clickhouse.py"}]}
2,231
113
gh_patches_debug_38354
rasdani/github-patches
git_diff
learningequality__kolibri-9047
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- videos not playing in mac app & Safari, but do in Firefox/Chrome `0.13.0b3` on Mojave 10.14.3. Both macApp and Safari seem to be having the same issue with playing videos, but Firefox & Chrome play just fine: ![2019-12-01_3-11-02](https://user-images.githubusercontent.com/1457929/69908512-586cf780-13eb-11ea-8b34-fd5f5457481b.jpg) ![2019-12-01_3-17-38](https://user-images.githubusercontent.com/1457929/69908505-3d01ec80-13eb-11ea-9b23-8a62ed5052cb.jpg) [logs.zip](https://github.com/learningequality/kolibri-installer-mac/files/3907784/logs.zip) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/utils/kolibri_whitenoise.py` Content: ``` 1 import os 2 import re 3 import stat 4 from collections import OrderedDict 5 6 from django.contrib.staticfiles import finders 7 from django.core.files.storage import FileSystemStorage 8 from django.utils._os import safe_join 9 from whitenoise import WhiteNoise 10 from whitenoise.httpstatus_backport import HTTPStatus 11 from whitenoise.responders import Response 12 from whitenoise.string_utils import decode_path_info 13 14 15 compressed_file_extensions = ("gz",) 16 17 not_found_status = HTTPStatus(404, "Not Found") 18 19 20 class NotFoundStaticFile(object): 21 """ 22 A special static file class to give a not found response, 23 rather than letting it be further handled by the wrapped WSGI server. 24 """ 25 26 def get_response(self, method, request_headers): 27 return Response(not_found_status, [], None) 28 29 30 NOT_FOUND = NotFoundStaticFile() 31 32 33 class FileFinder(finders.FileSystemFinder): 34 """ 35 A modified version of the Django FileSystemFinder class 36 which allows us to pass in arbitrary locations to find files 37 """ 38 39 def __init__(self, locations): 40 # List of locations with static files 41 self.locations = [] 42 self.prefixes = set() 43 # Maps dir paths to an appropriate storage instance 44 self.storages = OrderedDict() 45 if not isinstance(locations, (list, tuple)): 46 raise TypeError("locations argument is not a tuple or list") 47 for root in locations: 48 prefix, root = root 49 if not prefix: 50 raise ValueError( 51 "Cannot use unprefixed locations for dynamic locations" 52 ) 53 else: 54 prefix = prefix.rstrip("/") 55 if (prefix, root) not in self.locations: 56 self.locations.append((prefix, root)) 57 self.prefixes.add(prefix) 58 for prefix, root in self.locations: 59 filesystem_storage = FileSystemStorage(location=root) 60 filesystem_storage.prefix = prefix 61 self.storages[root] = filesystem_storage 62 63 def find_location(self, root, path, prefix=None): 64 """ 65 Finds a requested static file in a location, returning the found 66 absolute path (or ``None`` if no match). 67 Vendored from Django to handle being passed a URL path instead of a file path. 68 """ 69 if prefix: 70 prefix = prefix + "/" 71 if not path.startswith(prefix): 72 return None 73 path = path[len(prefix) :] 74 path = safe_join(root, path) 75 if os.path.exists(path): 76 return path 77 78 79 class DynamicWhiteNoise(WhiteNoise): 80 index_file = "index.html" 81 82 def __init__( 83 self, application, dynamic_locations=None, static_prefix=None, **kwargs 84 ): 85 whitenoise_settings = { 86 # Use 120 seconds as the default cache time for static assets 87 "max_age": 120, 88 # Add a test for any file name that contains a semantic version number 89 # or a 32 digit number (assumed to be a file hash) 90 # these files will be cached indefinitely 91 "immutable_file_test": r"((0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)|[a-f0-9]{32})", 92 "autorefresh": os.environ.get("KOLIBRI_DEVELOPER_MODE", False), 93 } 94 kwargs.update(whitenoise_settings) 95 super(DynamicWhiteNoise, self).__init__(application, **kwargs) 96 self.dynamic_finder = FileFinder(dynamic_locations or []) 97 # Generate a regex to check if a path matches one of our dynamic 98 # location prefixes 99 self.dynamic_check = ( 100 re.compile("^({})".format("|".join(self.dynamic_finder.prefixes))) 101 if self.dynamic_finder.prefixes 102 else None 103 ) 104 if static_prefix is not None and not static_prefix.endswith("/"): 105 raise ValueError("Static prefix must end in '/'") 106 self.static_prefix = static_prefix 107 108 def __call__(self, environ, start_response): 109 path = decode_path_info(environ.get("PATH_INFO", "")) 110 if self.autorefresh: 111 static_file = self.find_file(path) 112 else: 113 static_file = self.files.get(path) 114 if static_file is None: 115 static_file = self.find_and_cache_dynamic_file(path) 116 if static_file is None: 117 return self.application(environ, start_response) 118 return self.serve(static_file, environ, start_response) 119 120 def find_and_cache_dynamic_file(self, url): 121 path = self.get_dynamic_path(url) 122 if path: 123 file_stat = os.stat(path) 124 # Only try to do matches for regular files. 125 if stat.S_ISREG(file_stat.st_mode): 126 stat_cache = {path: os.stat(path)} 127 for ext in compressed_file_extensions: 128 try: 129 comp_path = "{}.{}".format(path, ext) 130 stat_cache[comp_path] = os.stat(comp_path) 131 except (IOError, OSError): 132 pass 133 self.add_file_to_dictionary(url, path, stat_cache=stat_cache) 134 elif ( 135 path is None 136 and self.static_prefix is not None 137 and url.startswith(self.static_prefix) 138 ): 139 self.files[url] = NOT_FOUND 140 return self.files.get(url) 141 142 def get_dynamic_path(self, url): 143 if self.static_prefix is not None and url.startswith(self.static_prefix): 144 return finders.find(url[len(self.static_prefix) :]) 145 if self.dynamic_check is not None and self.dynamic_check.match(url): 146 return self.dynamic_finder.find(url) 147 148 def candidate_paths_for_url(self, url): 149 paths = super(DynamicWhiteNoise, self).candidate_paths_for_url(url) 150 for path in paths: 151 yield path 152 path = self.get_dynamic_path(url) 153 if path: 154 yield path 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/utils/kolibri_whitenoise.py b/kolibri/utils/kolibri_whitenoise.py --- a/kolibri/utils/kolibri_whitenoise.py +++ b/kolibri/utils/kolibri_whitenoise.py @@ -2,13 +2,17 @@ import re import stat from collections import OrderedDict +from io import BufferedIOBase +from wsgiref.headers import Headers from django.contrib.staticfiles import finders from django.core.files.storage import FileSystemStorage from django.utils._os import safe_join from whitenoise import WhiteNoise from whitenoise.httpstatus_backport import HTTPStatus +from whitenoise.responders import MissingFileError from whitenoise.responders import Response +from whitenoise.responders import StaticFile from whitenoise.string_utils import decode_path_info @@ -76,6 +80,40 @@ return path +class SlicedFile(BufferedIOBase): + def __init__(self, fileobj, start, end): + fileobj.seek(start) + self.fileobj = fileobj + self.remaining = end - start + 1 + + def read(self, size=-1): + if self.remaining <= 0: + return b"" + if size >= 0: + size = min(size, self.remaining) + data = self.fileobj.read(size) + self.remaining -= size + return data + + +class EndRangeStaticFile(StaticFile): + def get_range_response(self, range_header, base_headers, file_handle): + headers = [] + for item in base_headers: + if item[0] == "Content-Length": + size = int(item[1]) + else: + headers.append(item) + start, end = self.get_byte_range(range_header, size) + if start >= end: + return self.get_range_not_satisfiable_response(file_handle, size) + if file_handle is not None: + file_handle = SlicedFile(file_handle, start, end) + headers.append(("Content-Range", "bytes {}-{}/{}".format(start, end, size))) + headers.append(("Content-Length", str(end - start + 1))) + return Response(HTTPStatus.PARTIAL_CONTENT, headers, file_handle) + + class DynamicWhiteNoise(WhiteNoise): index_file = "index.html" @@ -152,3 +190,25 @@ path = self.get_dynamic_path(url) if path: yield path + + def get_static_file(self, path, url, stat_cache=None): + """ + Vendor this function from source to substitute in our + own StaticFile class that can properly handle ranges. + """ + # Optimization: bail early if file does not exist + if stat_cache is None and not os.path.exists(path): + raise MissingFileError(path) + headers = Headers([]) + self.add_mime_headers(headers, path, url) + self.add_cache_headers(headers, path, url) + if self.allow_all_origins: + headers["Access-Control-Allow-Origin"] = "*" + if self.add_headers_function: + self.add_headers_function(headers, path, url) + return EndRangeStaticFile( + path, + headers.items(), + stat_cache=stat_cache, + encodings={"gzip": path + ".gz", "br": path + ".br"}, + )
{"golden_diff": "diff --git a/kolibri/utils/kolibri_whitenoise.py b/kolibri/utils/kolibri_whitenoise.py\n--- a/kolibri/utils/kolibri_whitenoise.py\n+++ b/kolibri/utils/kolibri_whitenoise.py\n@@ -2,13 +2,17 @@\n import re\n import stat\n from collections import OrderedDict\n+from io import BufferedIOBase\n+from wsgiref.headers import Headers\n \n from django.contrib.staticfiles import finders\n from django.core.files.storage import FileSystemStorage\n from django.utils._os import safe_join\n from whitenoise import WhiteNoise\n from whitenoise.httpstatus_backport import HTTPStatus\n+from whitenoise.responders import MissingFileError\n from whitenoise.responders import Response\n+from whitenoise.responders import StaticFile\n from whitenoise.string_utils import decode_path_info\n \n \n@@ -76,6 +80,40 @@\n return path\n \n \n+class SlicedFile(BufferedIOBase):\n+ def __init__(self, fileobj, start, end):\n+ fileobj.seek(start)\n+ self.fileobj = fileobj\n+ self.remaining = end - start + 1\n+\n+ def read(self, size=-1):\n+ if self.remaining <= 0:\n+ return b\"\"\n+ if size >= 0:\n+ size = min(size, self.remaining)\n+ data = self.fileobj.read(size)\n+ self.remaining -= size\n+ return data\n+\n+\n+class EndRangeStaticFile(StaticFile):\n+ def get_range_response(self, range_header, base_headers, file_handle):\n+ headers = []\n+ for item in base_headers:\n+ if item[0] == \"Content-Length\":\n+ size = int(item[1])\n+ else:\n+ headers.append(item)\n+ start, end = self.get_byte_range(range_header, size)\n+ if start >= end:\n+ return self.get_range_not_satisfiable_response(file_handle, size)\n+ if file_handle is not None:\n+ file_handle = SlicedFile(file_handle, start, end)\n+ headers.append((\"Content-Range\", \"bytes {}-{}/{}\".format(start, end, size)))\n+ headers.append((\"Content-Length\", str(end - start + 1)))\n+ return Response(HTTPStatus.PARTIAL_CONTENT, headers, file_handle)\n+\n+\n class DynamicWhiteNoise(WhiteNoise):\n index_file = \"index.html\"\n \n@@ -152,3 +190,25 @@\n path = self.get_dynamic_path(url)\n if path:\n yield path\n+\n+ def get_static_file(self, path, url, stat_cache=None):\n+ \"\"\"\n+ Vendor this function from source to substitute in our\n+ own StaticFile class that can properly handle ranges.\n+ \"\"\"\n+ # Optimization: bail early if file does not exist\n+ if stat_cache is None and not os.path.exists(path):\n+ raise MissingFileError(path)\n+ headers = Headers([])\n+ self.add_mime_headers(headers, path, url)\n+ self.add_cache_headers(headers, path, url)\n+ if self.allow_all_origins:\n+ headers[\"Access-Control-Allow-Origin\"] = \"*\"\n+ if self.add_headers_function:\n+ self.add_headers_function(headers, path, url)\n+ return EndRangeStaticFile(\n+ path,\n+ headers.items(),\n+ stat_cache=stat_cache,\n+ encodings={\"gzip\": path + \".gz\", \"br\": path + \".br\"},\n+ )\n", "issue": "videos not playing in mac app & Safari, but do in Firefox/Chrome\n`0.13.0b3` on Mojave 10.14.3.\r\n\r\nBoth macApp and Safari seem to be having the same issue with playing videos, but Firefox & Chrome play just fine:\r\n\r\n![2019-12-01_3-11-02](https://user-images.githubusercontent.com/1457929/69908512-586cf780-13eb-11ea-8b34-fd5f5457481b.jpg)\r\n\r\n\r\n![2019-12-01_3-17-38](https://user-images.githubusercontent.com/1457929/69908505-3d01ec80-13eb-11ea-9b23-8a62ed5052cb.jpg)\r\n\r\n[logs.zip](https://github.com/learningequality/kolibri-installer-mac/files/3907784/logs.zip)\r\n\n", "before_files": [{"content": "import os\nimport re\nimport stat\nfrom collections import OrderedDict\n\nfrom django.contrib.staticfiles import finders\nfrom django.core.files.storage import FileSystemStorage\nfrom django.utils._os import safe_join\nfrom whitenoise import WhiteNoise\nfrom whitenoise.httpstatus_backport import HTTPStatus\nfrom whitenoise.responders import Response\nfrom whitenoise.string_utils import decode_path_info\n\n\ncompressed_file_extensions = (\"gz\",)\n\nnot_found_status = HTTPStatus(404, \"Not Found\")\n\n\nclass NotFoundStaticFile(object):\n \"\"\"\n A special static file class to give a not found response,\n rather than letting it be further handled by the wrapped WSGI server.\n \"\"\"\n\n def get_response(self, method, request_headers):\n return Response(not_found_status, [], None)\n\n\nNOT_FOUND = NotFoundStaticFile()\n\n\nclass FileFinder(finders.FileSystemFinder):\n \"\"\"\n A modified version of the Django FileSystemFinder class\n which allows us to pass in arbitrary locations to find files\n \"\"\"\n\n def __init__(self, locations):\n # List of locations with static files\n self.locations = []\n self.prefixes = set()\n # Maps dir paths to an appropriate storage instance\n self.storages = OrderedDict()\n if not isinstance(locations, (list, tuple)):\n raise TypeError(\"locations argument is not a tuple or list\")\n for root in locations:\n prefix, root = root\n if not prefix:\n raise ValueError(\n \"Cannot use unprefixed locations for dynamic locations\"\n )\n else:\n prefix = prefix.rstrip(\"/\")\n if (prefix, root) not in self.locations:\n self.locations.append((prefix, root))\n self.prefixes.add(prefix)\n for prefix, root in self.locations:\n filesystem_storage = FileSystemStorage(location=root)\n filesystem_storage.prefix = prefix\n self.storages[root] = filesystem_storage\n\n def find_location(self, root, path, prefix=None):\n \"\"\"\n Finds a requested static file in a location, returning the found\n absolute path (or ``None`` if no match).\n Vendored from Django to handle being passed a URL path instead of a file path.\n \"\"\"\n if prefix:\n prefix = prefix + \"/\"\n if not path.startswith(prefix):\n return None\n path = path[len(prefix) :]\n path = safe_join(root, path)\n if os.path.exists(path):\n return path\n\n\nclass DynamicWhiteNoise(WhiteNoise):\n index_file = \"index.html\"\n\n def __init__(\n self, application, dynamic_locations=None, static_prefix=None, **kwargs\n ):\n whitenoise_settings = {\n # Use 120 seconds as the default cache time for static assets\n \"max_age\": 120,\n # Add a test for any file name that contains a semantic version number\n # or a 32 digit number (assumed to be a file hash)\n # these files will be cached indefinitely\n \"immutable_file_test\": r\"((0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)|[a-f0-9]{32})\",\n \"autorefresh\": os.environ.get(\"KOLIBRI_DEVELOPER_MODE\", False),\n }\n kwargs.update(whitenoise_settings)\n super(DynamicWhiteNoise, self).__init__(application, **kwargs)\n self.dynamic_finder = FileFinder(dynamic_locations or [])\n # Generate a regex to check if a path matches one of our dynamic\n # location prefixes\n self.dynamic_check = (\n re.compile(\"^({})\".format(\"|\".join(self.dynamic_finder.prefixes)))\n if self.dynamic_finder.prefixes\n else None\n )\n if static_prefix is not None and not static_prefix.endswith(\"/\"):\n raise ValueError(\"Static prefix must end in '/'\")\n self.static_prefix = static_prefix\n\n def __call__(self, environ, start_response):\n path = decode_path_info(environ.get(\"PATH_INFO\", \"\"))\n if self.autorefresh:\n static_file = self.find_file(path)\n else:\n static_file = self.files.get(path)\n if static_file is None:\n static_file = self.find_and_cache_dynamic_file(path)\n if static_file is None:\n return self.application(environ, start_response)\n return self.serve(static_file, environ, start_response)\n\n def find_and_cache_dynamic_file(self, url):\n path = self.get_dynamic_path(url)\n if path:\n file_stat = os.stat(path)\n # Only try to do matches for regular files.\n if stat.S_ISREG(file_stat.st_mode):\n stat_cache = {path: os.stat(path)}\n for ext in compressed_file_extensions:\n try:\n comp_path = \"{}.{}\".format(path, ext)\n stat_cache[comp_path] = os.stat(comp_path)\n except (IOError, OSError):\n pass\n self.add_file_to_dictionary(url, path, stat_cache=stat_cache)\n elif (\n path is None\n and self.static_prefix is not None\n and url.startswith(self.static_prefix)\n ):\n self.files[url] = NOT_FOUND\n return self.files.get(url)\n\n def get_dynamic_path(self, url):\n if self.static_prefix is not None and url.startswith(self.static_prefix):\n return finders.find(url[len(self.static_prefix) :])\n if self.dynamic_check is not None and self.dynamic_check.match(url):\n return self.dynamic_finder.find(url)\n\n def candidate_paths_for_url(self, url):\n paths = super(DynamicWhiteNoise, self).candidate_paths_for_url(url)\n for path in paths:\n yield path\n path = self.get_dynamic_path(url)\n if path:\n yield path\n", "path": "kolibri/utils/kolibri_whitenoise.py"}], "after_files": [{"content": "import os\nimport re\nimport stat\nfrom collections import OrderedDict\nfrom io import BufferedIOBase\nfrom wsgiref.headers import Headers\n\nfrom django.contrib.staticfiles import finders\nfrom django.core.files.storage import FileSystemStorage\nfrom django.utils._os import safe_join\nfrom whitenoise import WhiteNoise\nfrom whitenoise.httpstatus_backport import HTTPStatus\nfrom whitenoise.responders import MissingFileError\nfrom whitenoise.responders import Response\nfrom whitenoise.responders import StaticFile\nfrom whitenoise.string_utils import decode_path_info\n\n\ncompressed_file_extensions = (\"gz\",)\n\nnot_found_status = HTTPStatus(404, \"Not Found\")\n\n\nclass NotFoundStaticFile(object):\n \"\"\"\n A special static file class to give a not found response,\n rather than letting it be further handled by the wrapped WSGI server.\n \"\"\"\n\n def get_response(self, method, request_headers):\n return Response(not_found_status, [], None)\n\n\nNOT_FOUND = NotFoundStaticFile()\n\n\nclass FileFinder(finders.FileSystemFinder):\n \"\"\"\n A modified version of the Django FileSystemFinder class\n which allows us to pass in arbitrary locations to find files\n \"\"\"\n\n def __init__(self, locations):\n # List of locations with static files\n self.locations = []\n self.prefixes = set()\n # Maps dir paths to an appropriate storage instance\n self.storages = OrderedDict()\n if not isinstance(locations, (list, tuple)):\n raise TypeError(\"locations argument is not a tuple or list\")\n for root in locations:\n prefix, root = root\n if not prefix:\n raise ValueError(\n \"Cannot use unprefixed locations for dynamic locations\"\n )\n else:\n prefix = prefix.rstrip(\"/\")\n if (prefix, root) not in self.locations:\n self.locations.append((prefix, root))\n self.prefixes.add(prefix)\n for prefix, root in self.locations:\n filesystem_storage = FileSystemStorage(location=root)\n filesystem_storage.prefix = prefix\n self.storages[root] = filesystem_storage\n\n def find_location(self, root, path, prefix=None):\n \"\"\"\n Finds a requested static file in a location, returning the found\n absolute path (or ``None`` if no match).\n Vendored from Django to handle being passed a URL path instead of a file path.\n \"\"\"\n if prefix:\n prefix = prefix + \"/\"\n if not path.startswith(prefix):\n return None\n path = path[len(prefix) :]\n path = safe_join(root, path)\n if os.path.exists(path):\n return path\n\n\nclass SlicedFile(BufferedIOBase):\n def __init__(self, fileobj, start, end):\n fileobj.seek(start)\n self.fileobj = fileobj\n self.remaining = end - start + 1\n\n def read(self, size=-1):\n if self.remaining <= 0:\n return b\"\"\n if size >= 0:\n size = min(size, self.remaining)\n data = self.fileobj.read(size)\n self.remaining -= size\n return data\n\n\nclass EndRangeStaticFile(StaticFile):\n def get_range_response(self, range_header, base_headers, file_handle):\n headers = []\n for item in base_headers:\n if item[0] == \"Content-Length\":\n size = int(item[1])\n else:\n headers.append(item)\n start, end = self.get_byte_range(range_header, size)\n if start >= end:\n return self.get_range_not_satisfiable_response(file_handle, size)\n if file_handle is not None:\n file_handle = SlicedFile(file_handle, start, end)\n headers.append((\"Content-Range\", \"bytes {}-{}/{}\".format(start, end, size)))\n headers.append((\"Content-Length\", str(end - start + 1)))\n return Response(HTTPStatus.PARTIAL_CONTENT, headers, file_handle)\n\n\nclass DynamicWhiteNoise(WhiteNoise):\n index_file = \"index.html\"\n\n def __init__(\n self, application, dynamic_locations=None, static_prefix=None, **kwargs\n ):\n whitenoise_settings = {\n # Use 120 seconds as the default cache time for static assets\n \"max_age\": 120,\n # Add a test for any file name that contains a semantic version number\n # or a 32 digit number (assumed to be a file hash)\n # these files will be cached indefinitely\n \"immutable_file_test\": r\"((0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)|[a-f0-9]{32})\",\n \"autorefresh\": os.environ.get(\"KOLIBRI_DEVELOPER_MODE\", False),\n }\n kwargs.update(whitenoise_settings)\n super(DynamicWhiteNoise, self).__init__(application, **kwargs)\n self.dynamic_finder = FileFinder(dynamic_locations or [])\n # Generate a regex to check if a path matches one of our dynamic\n # location prefixes\n self.dynamic_check = (\n re.compile(\"^({})\".format(\"|\".join(self.dynamic_finder.prefixes)))\n if self.dynamic_finder.prefixes\n else None\n )\n if static_prefix is not None and not static_prefix.endswith(\"/\"):\n raise ValueError(\"Static prefix must end in '/'\")\n self.static_prefix = static_prefix\n\n def __call__(self, environ, start_response):\n path = decode_path_info(environ.get(\"PATH_INFO\", \"\"))\n if self.autorefresh:\n static_file = self.find_file(path)\n else:\n static_file = self.files.get(path)\n if static_file is None:\n static_file = self.find_and_cache_dynamic_file(path)\n if static_file is None:\n return self.application(environ, start_response)\n return self.serve(static_file, environ, start_response)\n\n def find_and_cache_dynamic_file(self, url):\n path = self.get_dynamic_path(url)\n if path:\n file_stat = os.stat(path)\n # Only try to do matches for regular files.\n if stat.S_ISREG(file_stat.st_mode):\n stat_cache = {path: os.stat(path)}\n for ext in compressed_file_extensions:\n try:\n comp_path = \"{}.{}\".format(path, ext)\n stat_cache[comp_path] = os.stat(comp_path)\n except (IOError, OSError):\n pass\n self.add_file_to_dictionary(url, path, stat_cache=stat_cache)\n elif (\n path is None\n and self.static_prefix is not None\n and url.startswith(self.static_prefix)\n ):\n self.files[url] = NOT_FOUND\n return self.files.get(url)\n\n def get_dynamic_path(self, url):\n if self.static_prefix is not None and url.startswith(self.static_prefix):\n return finders.find(url[len(self.static_prefix) :])\n if self.dynamic_check is not None and self.dynamic_check.match(url):\n return self.dynamic_finder.find(url)\n\n def candidate_paths_for_url(self, url):\n paths = super(DynamicWhiteNoise, self).candidate_paths_for_url(url)\n for path in paths:\n yield path\n path = self.get_dynamic_path(url)\n if path:\n yield path\n\n def get_static_file(self, path, url, stat_cache=None):\n \"\"\"\n Vendor this function from source to substitute in our\n own StaticFile class that can properly handle ranges.\n \"\"\"\n # Optimization: bail early if file does not exist\n if stat_cache is None and not os.path.exists(path):\n raise MissingFileError(path)\n headers = Headers([])\n self.add_mime_headers(headers, path, url)\n self.add_cache_headers(headers, path, url)\n if self.allow_all_origins:\n headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if self.add_headers_function:\n self.add_headers_function(headers, path, url)\n return EndRangeStaticFile(\n path,\n headers.items(),\n stat_cache=stat_cache,\n encodings={\"gzip\": path + \".gz\", \"br\": path + \".br\"},\n )\n", "path": "kolibri/utils/kolibri_whitenoise.py"}]}
2,093
769
gh_patches_debug_14540
rasdani/github-patches
git_diff
ckan__ckan-6088
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow uploaders to only override asset / resource uploading **CKAN version** 2.8.7, 2.9.2 **Describe the bug** Currently, CKAN allows handling both image / asset uploading and resource uploading via the `IUploader` plugin interface. That interface allows plugins to return `None` from `get_uploader()` / `get_resource_uploader()` so that to signify that this uploader in not meant to handle that type of upload. However, when trying to use two different plugins each implementing a different type of uploader, one may encounter a problem where depending on the order of loading, there could be conflicts or CKAN might fall back to it's built-in uploader for no good reason. This is because of this logic in https://github.com/ckan/ckan/blob/5a4af6fc3bee81331fbc19c632bf6089dd1aa18a/ckan/lib/uploader.py#L52-L57 - it will iterate over *all* uploaders and call `get_uploader()`. If the *last one* returns None, it will fall back to CKAN's built in. It should pick either the first or last uploader to return a non-`None` value instead. Same happens for `get_resource_uploader()` in https://github.com/ckan/ckan/blob/5a4af6fc3bee81331fbc19c632bf6089dd1aa18a/ckan/lib/uploader.py#L65-L70. This is an easy fix, but I'm not sure if the preferred logic is to pick the *first* or *last* plugin to implement an uploader for that file type. **Steps to reproduce** Install two plugins, each supporting a different kind of uploader, and see them conflict, and see CKAN fall back to it's internal uploader for one of the file types. **Expected behavior** CKAN picks the first / last uploader plugin that supports this file type. **Additional details** I have encountered this when working on ckanext-asset-storage, but I suppose it can affect other uploaders / custom built ones as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/lib/uploader.py` Content: ``` 1 # encoding: utf-8 2 3 import os 4 import cgi 5 import datetime 6 import logging 7 import magic 8 import mimetypes 9 from six.moves.urllib.parse import urlparse 10 11 from werkzeug.datastructures import FileStorage as FlaskFileStorage 12 13 import ckan.lib.munge as munge 14 import ckan.logic as logic 15 import ckan.plugins as plugins 16 from ckan.common import config 17 18 ALLOWED_UPLOAD_TYPES = (cgi.FieldStorage, FlaskFileStorage) 19 MB = 1 << 20 20 21 log = logging.getLogger(__name__) 22 23 _storage_path = None 24 _max_resource_size = None 25 _max_image_size = None 26 27 28 def _copy_file(input_file, output_file, max_size): 29 input_file.seek(0) 30 current_size = 0 31 while True: 32 current_size = current_size + 1 33 # MB chunks 34 data = input_file.read(MB) 35 36 if not data: 37 break 38 output_file.write(data) 39 if current_size > max_size: 40 raise logic.ValidationError({'upload': ['File upload too large']}) 41 42 43 def _get_underlying_file(wrapper): 44 if isinstance(wrapper, FlaskFileStorage): 45 return wrapper.stream 46 return wrapper.file 47 48 49 def get_uploader(upload_to, old_filename=None): 50 '''Query IUploader plugins and return an uploader instance for general 51 files.''' 52 upload = None 53 for plugin in plugins.PluginImplementations(plugins.IUploader): 54 upload = plugin.get_uploader(upload_to, old_filename) 55 56 # default uploader 57 if upload is None: 58 upload = Upload(upload_to, old_filename) 59 60 return upload 61 62 63 def get_resource_uploader(data_dict): 64 '''Query IUploader plugins and return a resource uploader instance.''' 65 upload = None 66 for plugin in plugins.PluginImplementations(plugins.IUploader): 67 upload = plugin.get_resource_uploader(data_dict) 68 69 # default uploader 70 if upload is None: 71 upload = ResourceUpload(data_dict) 72 73 return upload 74 75 76 def get_storage_path(): 77 '''Function to cache storage path''' 78 global _storage_path 79 80 # None means it has not been set. False means not in config. 81 if _storage_path is None: 82 storage_path = config.get('ckan.storage_path') 83 if storage_path: 84 _storage_path = storage_path 85 else: 86 log.critical('''Please specify a ckan.storage_path in your config 87 for your uploads''') 88 _storage_path = False 89 90 return _storage_path 91 92 93 def get_max_image_size(): 94 global _max_image_size 95 if _max_image_size is None: 96 _max_image_size = int(config.get('ckan.max_image_size', 2)) 97 return _max_image_size 98 99 100 def get_max_resource_size(): 101 global _max_resource_size 102 if _max_resource_size is None: 103 _max_resource_size = int(config.get('ckan.max_resource_size', 10)) 104 return _max_resource_size 105 106 107 class Upload(object): 108 def __init__(self, object_type, old_filename=None): 109 ''' Setup upload by creating a subdirectory of the storage directory 110 of name object_type. old_filename is the name of the file in the url 111 field last time''' 112 113 self.storage_path = None 114 self.filename = None 115 self.filepath = None 116 path = get_storage_path() 117 if not path: 118 return 119 self.storage_path = os.path.join(path, 'storage', 120 'uploads', object_type) 121 try: 122 os.makedirs(self.storage_path) 123 except OSError as e: 124 # errno 17 is file already exists 125 if e.errno != 17: 126 raise 127 self.object_type = object_type 128 self.old_filename = old_filename 129 if old_filename: 130 self.old_filepath = os.path.join(self.storage_path, old_filename) 131 132 def update_data_dict(self, data_dict, url_field, file_field, clear_field): 133 ''' Manipulate data from the data_dict. url_field is the name of the 134 field where the upload is going to be. file_field is name of the key 135 where the FieldStorage is kept (i.e the field where the file data 136 actually is). clear_field is the name of a boolean field which 137 requests the upload to be deleted. This needs to be called before 138 it reaches any validators''' 139 140 self.url = data_dict.get(url_field, '') 141 self.clear = data_dict.pop(clear_field, None) 142 self.file_field = file_field 143 self.upload_field_storage = data_dict.pop(file_field, None) 144 145 if not self.storage_path: 146 return 147 148 if isinstance(self.upload_field_storage, ALLOWED_UPLOAD_TYPES): 149 if self.upload_field_storage.filename: 150 self.filename = self.upload_field_storage.filename 151 self.filename = str(datetime.datetime.utcnow()) + self.filename 152 self.filename = munge.munge_filename_legacy(self.filename) 153 self.filepath = os.path.join(self.storage_path, self.filename) 154 data_dict[url_field] = self.filename 155 self.upload_file = _get_underlying_file( 156 self.upload_field_storage) 157 self.tmp_filepath = self.filepath + '~' 158 # keep the file if there has been no change 159 elif self.old_filename and not self.old_filename.startswith('http'): 160 if not self.clear: 161 data_dict[url_field] = self.old_filename 162 if self.clear and self.url == self.old_filename: 163 data_dict[url_field] = '' 164 165 def upload(self, max_size=2): 166 ''' Actually upload the file. 167 This should happen just before a commit but after the data has 168 been validated and flushed to the db. This is so we do not store 169 anything unless the request is actually good. 170 max_size is size in MB maximum of the file''' 171 172 if self.filename: 173 with open(self.tmp_filepath, 'wb+') as output_file: 174 try: 175 _copy_file(self.upload_file, output_file, max_size) 176 except logic.ValidationError: 177 os.remove(self.tmp_filepath) 178 raise 179 finally: 180 self.upload_file.close() 181 os.rename(self.tmp_filepath, self.filepath) 182 self.clear = True 183 184 if (self.clear and self.old_filename 185 and not self.old_filename.startswith('http')): 186 try: 187 os.remove(self.old_filepath) 188 except OSError: 189 pass 190 191 192 class ResourceUpload(object): 193 def __init__(self, resource): 194 path = get_storage_path() 195 config_mimetype_guess = config.get('ckan.mimetype_guess', 'file_ext') 196 197 if not path: 198 self.storage_path = None 199 return 200 self.storage_path = os.path.join(path, 'resources') 201 try: 202 os.makedirs(self.storage_path) 203 except OSError as e: 204 # errno 17 is file already exists 205 if e.errno != 17: 206 raise 207 self.filename = None 208 self.mimetype = None 209 210 url = resource.get('url') 211 212 upload_field_storage = resource.pop('upload', None) 213 self.clear = resource.pop('clear_upload', None) 214 215 if url and config_mimetype_guess == 'file_ext' and urlparse(url).path: 216 self.mimetype = mimetypes.guess_type(url)[0] 217 218 if bool(upload_field_storage) and \ 219 isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES): 220 self.filesize = 0 # bytes 221 222 self.filename = upload_field_storage.filename 223 self.filename = munge.munge_filename(self.filename) 224 resource['url'] = self.filename 225 resource['url_type'] = 'upload' 226 resource['last_modified'] = datetime.datetime.utcnow() 227 self.upload_file = _get_underlying_file(upload_field_storage) 228 self.upload_file.seek(0, os.SEEK_END) 229 self.filesize = self.upload_file.tell() 230 # go back to the beginning of the file buffer 231 self.upload_file.seek(0, os.SEEK_SET) 232 233 # check if the mimetype failed from guessing with the url 234 if not self.mimetype and config_mimetype_guess == 'file_ext': 235 self.mimetype = mimetypes.guess_type(self.filename)[0] 236 237 if not self.mimetype and config_mimetype_guess == 'file_contents': 238 try: 239 self.mimetype = magic.from_buffer(self.upload_file.read(), 240 mime=True) 241 self.upload_file.seek(0, os.SEEK_SET) 242 except IOError as e: 243 # Not that important if call above fails 244 self.mimetype = None 245 246 elif self.clear: 247 resource['url_type'] = '' 248 249 def get_directory(self, id): 250 directory = os.path.join(self.storage_path, 251 id[0:3], id[3:6]) 252 return directory 253 254 def get_path(self, id): 255 directory = self.get_directory(id) 256 filepath = os.path.join(directory, id[6:]) 257 return filepath 258 259 def upload(self, id, max_size=10): 260 '''Actually upload the file. 261 262 :returns: ``'file uploaded'`` if a new file was successfully uploaded 263 (whether it overwrote a previously uploaded file or not), 264 ``'file deleted'`` if an existing uploaded file was deleted, 265 or ``None`` if nothing changed 266 :rtype: ``string`` or ``None`` 267 268 ''' 269 if not self.storage_path: 270 return 271 272 # Get directory and filepath on the system 273 # where the file for this resource will be stored 274 directory = self.get_directory(id) 275 filepath = self.get_path(id) 276 277 # If a filename has been provided (a file is being uploaded) 278 # we write it to the filepath (and overwrite it if it already 279 # exists). This way the uploaded file will always be stored 280 # in the same location 281 if self.filename: 282 try: 283 os.makedirs(directory) 284 except OSError as e: 285 # errno 17 is file already exists 286 if e.errno != 17: 287 raise 288 tmp_filepath = filepath + '~' 289 with open(tmp_filepath, 'wb+') as output_file: 290 try: 291 _copy_file(self.upload_file, output_file, max_size) 292 except logic.ValidationError: 293 os.remove(tmp_filepath) 294 raise 295 finally: 296 self.upload_file.close() 297 os.rename(tmp_filepath, filepath) 298 return 299 300 # The resource form only sets self.clear (via the input clear_upload) 301 # to True when an uploaded file is not replaced by another uploaded 302 # file, only if it is replaced by a link to file. 303 # If the uploaded file is replaced by a link, we should remove the 304 # previously uploaded file to clean up the file system. 305 if self.clear: 306 try: 307 os.remove(filepath) 308 except OSError as e: 309 pass 310 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/lib/uploader.py b/ckan/lib/uploader.py --- a/ckan/lib/uploader.py +++ b/ckan/lib/uploader.py @@ -52,6 +52,8 @@ upload = None for plugin in plugins.PluginImplementations(plugins.IUploader): upload = plugin.get_uploader(upload_to, old_filename) + if upload: + break # default uploader if upload is None: @@ -65,6 +67,8 @@ upload = None for plugin in plugins.PluginImplementations(plugins.IUploader): upload = plugin.get_resource_uploader(data_dict) + if upload: + break # default uploader if upload is None:
{"golden_diff": "diff --git a/ckan/lib/uploader.py b/ckan/lib/uploader.py\n--- a/ckan/lib/uploader.py\n+++ b/ckan/lib/uploader.py\n@@ -52,6 +52,8 @@\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_uploader(upload_to, old_filename)\n+ if upload:\n+ break\n \n # default uploader\n if upload is None:\n@@ -65,6 +67,8 @@\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_resource_uploader(data_dict)\n+ if upload:\n+ break\n \n # default uploader\n if upload is None:\n", "issue": "Allow uploaders to only override asset / resource uploading\n**CKAN version**\r\n2.8.7, 2.9.2\r\n\r\n**Describe the bug**\r\nCurrently, CKAN allows handling both image / asset uploading and resource uploading via the `IUploader` plugin interface. That interface allows plugins to return `None` from `get_uploader()` / `get_resource_uploader()` so that to signify that this uploader in not meant to handle that type of upload. \r\n\r\nHowever, when trying to use two different plugins each implementing a different type of uploader, one may encounter a problem where depending on the order of loading, there could be conflicts or CKAN might fall back to it's built-in uploader for no good reason. \r\n\r\nThis is because of this logic in https://github.com/ckan/ckan/blob/5a4af6fc3bee81331fbc19c632bf6089dd1aa18a/ckan/lib/uploader.py#L52-L57 - it will iterate over *all* uploaders and call `get_uploader()`. If the *last one* returns None, it will fall back to CKAN's built in. It should pick either the first or last uploader to return a non-`None` value instead. \r\n\r\nSame happens for `get_resource_uploader()` in https://github.com/ckan/ckan/blob/5a4af6fc3bee81331fbc19c632bf6089dd1aa18a/ckan/lib/uploader.py#L65-L70.\r\n\r\nThis is an easy fix, but I'm not sure if the preferred logic is to pick the *first* or *last* plugin to implement an uploader for that file type. \r\n\r\n**Steps to reproduce**\r\nInstall two plugins, each supporting a different kind of uploader, and see them conflict, and see CKAN fall back to it's internal uploader for one of the file types.\r\n\r\n**Expected behavior**\r\nCKAN picks the first / last uploader plugin that supports this file type. \r\n\r\n**Additional details**\r\nI have encountered this when working on ckanext-asset-storage, but I suppose it can affect other uploaders / custom built ones as well.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\nimport cgi\nimport datetime\nimport logging\nimport magic\nimport mimetypes\nfrom six.moves.urllib.parse import urlparse\n\nfrom werkzeug.datastructures import FileStorage as FlaskFileStorage\n\nimport ckan.lib.munge as munge\nimport ckan.logic as logic\nimport ckan.plugins as plugins\nfrom ckan.common import config\n\nALLOWED_UPLOAD_TYPES = (cgi.FieldStorage, FlaskFileStorage)\nMB = 1 << 20\n\nlog = logging.getLogger(__name__)\n\n_storage_path = None\n_max_resource_size = None\n_max_image_size = None\n\n\ndef _copy_file(input_file, output_file, max_size):\n input_file.seek(0)\n current_size = 0\n while True:\n current_size = current_size + 1\n # MB chunks\n data = input_file.read(MB)\n\n if not data:\n break\n output_file.write(data)\n if current_size > max_size:\n raise logic.ValidationError({'upload': ['File upload too large']})\n\n\ndef _get_underlying_file(wrapper):\n if isinstance(wrapper, FlaskFileStorage):\n return wrapper.stream\n return wrapper.file\n\n\ndef get_uploader(upload_to, old_filename=None):\n '''Query IUploader plugins and return an uploader instance for general\n files.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_uploader(upload_to, old_filename)\n\n # default uploader\n if upload is None:\n upload = Upload(upload_to, old_filename)\n\n return upload\n\n\ndef get_resource_uploader(data_dict):\n '''Query IUploader plugins and return a resource uploader instance.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_resource_uploader(data_dict)\n\n # default uploader\n if upload is None:\n upload = ResourceUpload(data_dict)\n\n return upload\n\n\ndef get_storage_path():\n '''Function to cache storage path'''\n global _storage_path\n\n # None means it has not been set. False means not in config.\n if _storage_path is None:\n storage_path = config.get('ckan.storage_path')\n if storage_path:\n _storage_path = storage_path\n else:\n log.critical('''Please specify a ckan.storage_path in your config\n for your uploads''')\n _storage_path = False\n\n return _storage_path\n\n\ndef get_max_image_size():\n global _max_image_size\n if _max_image_size is None:\n _max_image_size = int(config.get('ckan.max_image_size', 2))\n return _max_image_size\n\n\ndef get_max_resource_size():\n global _max_resource_size\n if _max_resource_size is None:\n _max_resource_size = int(config.get('ckan.max_resource_size', 10))\n return _max_resource_size\n\n\nclass Upload(object):\n def __init__(self, object_type, old_filename=None):\n ''' Setup upload by creating a subdirectory of the storage directory\n of name object_type. old_filename is the name of the file in the url\n field last time'''\n\n self.storage_path = None\n self.filename = None\n self.filepath = None\n path = get_storage_path()\n if not path:\n return\n self.storage_path = os.path.join(path, 'storage',\n 'uploads', object_type)\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.object_type = object_type\n self.old_filename = old_filename\n if old_filename:\n self.old_filepath = os.path.join(self.storage_path, old_filename)\n\n def update_data_dict(self, data_dict, url_field, file_field, clear_field):\n ''' Manipulate data from the data_dict. url_field is the name of the\n field where the upload is going to be. file_field is name of the key\n where the FieldStorage is kept (i.e the field where the file data\n actually is). clear_field is the name of a boolean field which\n requests the upload to be deleted. This needs to be called before\n it reaches any validators'''\n\n self.url = data_dict.get(url_field, '')\n self.clear = data_dict.pop(clear_field, None)\n self.file_field = file_field\n self.upload_field_storage = data_dict.pop(file_field, None)\n\n if not self.storage_path:\n return\n\n if isinstance(self.upload_field_storage, ALLOWED_UPLOAD_TYPES):\n if self.upload_field_storage.filename:\n self.filename = self.upload_field_storage.filename\n self.filename = str(datetime.datetime.utcnow()) + self.filename\n self.filename = munge.munge_filename_legacy(self.filename)\n self.filepath = os.path.join(self.storage_path, self.filename)\n data_dict[url_field] = self.filename\n self.upload_file = _get_underlying_file(\n self.upload_field_storage)\n self.tmp_filepath = self.filepath + '~'\n # keep the file if there has been no change\n elif self.old_filename and not self.old_filename.startswith('http'):\n if not self.clear:\n data_dict[url_field] = self.old_filename\n if self.clear and self.url == self.old_filename:\n data_dict[url_field] = ''\n\n def upload(self, max_size=2):\n ''' Actually upload the file.\n This should happen just before a commit but after the data has\n been validated and flushed to the db. This is so we do not store\n anything unless the request is actually good.\n max_size is size in MB maximum of the file'''\n\n if self.filename:\n with open(self.tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(self.tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(self.tmp_filepath, self.filepath)\n self.clear = True\n\n if (self.clear and self.old_filename\n and not self.old_filename.startswith('http')):\n try:\n os.remove(self.old_filepath)\n except OSError:\n pass\n\n\nclass ResourceUpload(object):\n def __init__(self, resource):\n path = get_storage_path()\n config_mimetype_guess = config.get('ckan.mimetype_guess', 'file_ext')\n\n if not path:\n self.storage_path = None\n return\n self.storage_path = os.path.join(path, 'resources')\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.filename = None\n self.mimetype = None\n\n url = resource.get('url')\n\n upload_field_storage = resource.pop('upload', None)\n self.clear = resource.pop('clear_upload', None)\n\n if url and config_mimetype_guess == 'file_ext' and urlparse(url).path:\n self.mimetype = mimetypes.guess_type(url)[0]\n\n if bool(upload_field_storage) and \\\n isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):\n self.filesize = 0 # bytes\n\n self.filename = upload_field_storage.filename\n self.filename = munge.munge_filename(self.filename)\n resource['url'] = self.filename\n resource['url_type'] = 'upload'\n resource['last_modified'] = datetime.datetime.utcnow()\n self.upload_file = _get_underlying_file(upload_field_storage)\n self.upload_file.seek(0, os.SEEK_END)\n self.filesize = self.upload_file.tell()\n # go back to the beginning of the file buffer\n self.upload_file.seek(0, os.SEEK_SET)\n\n # check if the mimetype failed from guessing with the url\n if not self.mimetype and config_mimetype_guess == 'file_ext':\n self.mimetype = mimetypes.guess_type(self.filename)[0]\n\n if not self.mimetype and config_mimetype_guess == 'file_contents':\n try:\n self.mimetype = magic.from_buffer(self.upload_file.read(),\n mime=True)\n self.upload_file.seek(0, os.SEEK_SET)\n except IOError as e:\n # Not that important if call above fails\n self.mimetype = None\n\n elif self.clear:\n resource['url_type'] = ''\n\n def get_directory(self, id):\n directory = os.path.join(self.storage_path,\n id[0:3], id[3:6])\n return directory\n\n def get_path(self, id):\n directory = self.get_directory(id)\n filepath = os.path.join(directory, id[6:])\n return filepath\n\n def upload(self, id, max_size=10):\n '''Actually upload the file.\n\n :returns: ``'file uploaded'`` if a new file was successfully uploaded\n (whether it overwrote a previously uploaded file or not),\n ``'file deleted'`` if an existing uploaded file was deleted,\n or ``None`` if nothing changed\n :rtype: ``string`` or ``None``\n\n '''\n if not self.storage_path:\n return\n\n # Get directory and filepath on the system\n # where the file for this resource will be stored\n directory = self.get_directory(id)\n filepath = self.get_path(id)\n\n # If a filename has been provided (a file is being uploaded)\n # we write it to the filepath (and overwrite it if it already\n # exists). This way the uploaded file will always be stored\n # in the same location\n if self.filename:\n try:\n os.makedirs(directory)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n tmp_filepath = filepath + '~'\n with open(tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(tmp_filepath, filepath)\n return\n\n # The resource form only sets self.clear (via the input clear_upload)\n # to True when an uploaded file is not replaced by another uploaded\n # file, only if it is replaced by a link to file.\n # If the uploaded file is replaced by a link, we should remove the\n # previously uploaded file to clean up the file system.\n if self.clear:\n try:\n os.remove(filepath)\n except OSError as e:\n pass\n", "path": "ckan/lib/uploader.py"}], "after_files": [{"content": "# encoding: utf-8\n\nimport os\nimport cgi\nimport datetime\nimport logging\nimport magic\nimport mimetypes\nfrom six.moves.urllib.parse import urlparse\n\nfrom werkzeug.datastructures import FileStorage as FlaskFileStorage\n\nimport ckan.lib.munge as munge\nimport ckan.logic as logic\nimport ckan.plugins as plugins\nfrom ckan.common import config\n\nALLOWED_UPLOAD_TYPES = (cgi.FieldStorage, FlaskFileStorage)\nMB = 1 << 20\n\nlog = logging.getLogger(__name__)\n\n_storage_path = None\n_max_resource_size = None\n_max_image_size = None\n\n\ndef _copy_file(input_file, output_file, max_size):\n input_file.seek(0)\n current_size = 0\n while True:\n current_size = current_size + 1\n # MB chunks\n data = input_file.read(MB)\n\n if not data:\n break\n output_file.write(data)\n if current_size > max_size:\n raise logic.ValidationError({'upload': ['File upload too large']})\n\n\ndef _get_underlying_file(wrapper):\n if isinstance(wrapper, FlaskFileStorage):\n return wrapper.stream\n return wrapper.file\n\n\ndef get_uploader(upload_to, old_filename=None):\n '''Query IUploader plugins and return an uploader instance for general\n files.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_uploader(upload_to, old_filename)\n if upload:\n break\n\n # default uploader\n if upload is None:\n upload = Upload(upload_to, old_filename)\n\n return upload\n\n\ndef get_resource_uploader(data_dict):\n '''Query IUploader plugins and return a resource uploader instance.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_resource_uploader(data_dict)\n if upload:\n break\n\n # default uploader\n if upload is None:\n upload = ResourceUpload(data_dict)\n\n return upload\n\n\ndef get_storage_path():\n '''Function to cache storage path'''\n global _storage_path\n\n # None means it has not been set. False means not in config.\n if _storage_path is None:\n storage_path = config.get('ckan.storage_path')\n if storage_path:\n _storage_path = storage_path\n else:\n log.critical('''Please specify a ckan.storage_path in your config\n for your uploads''')\n _storage_path = False\n\n return _storage_path\n\n\ndef get_max_image_size():\n global _max_image_size\n if _max_image_size is None:\n _max_image_size = int(config.get('ckan.max_image_size', 2))\n return _max_image_size\n\n\ndef get_max_resource_size():\n global _max_resource_size\n if _max_resource_size is None:\n _max_resource_size = int(config.get('ckan.max_resource_size', 10))\n return _max_resource_size\n\n\nclass Upload(object):\n def __init__(self, object_type, old_filename=None):\n ''' Setup upload by creating a subdirectory of the storage directory\n of name object_type. old_filename is the name of the file in the url\n field last time'''\n\n self.storage_path = None\n self.filename = None\n self.filepath = None\n path = get_storage_path()\n if not path:\n return\n self.storage_path = os.path.join(path, 'storage',\n 'uploads', object_type)\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.object_type = object_type\n self.old_filename = old_filename\n if old_filename:\n self.old_filepath = os.path.join(self.storage_path, old_filename)\n\n def update_data_dict(self, data_dict, url_field, file_field, clear_field):\n ''' Manipulate data from the data_dict. url_field is the name of the\n field where the upload is going to be. file_field is name of the key\n where the FieldStorage is kept (i.e the field where the file data\n actually is). clear_field is the name of a boolean field which\n requests the upload to be deleted. This needs to be called before\n it reaches any validators'''\n\n self.url = data_dict.get(url_field, '')\n self.clear = data_dict.pop(clear_field, None)\n self.file_field = file_field\n self.upload_field_storage = data_dict.pop(file_field, None)\n\n if not self.storage_path:\n return\n\n if isinstance(self.upload_field_storage, ALLOWED_UPLOAD_TYPES):\n if self.upload_field_storage.filename:\n self.filename = self.upload_field_storage.filename\n self.filename = str(datetime.datetime.utcnow()) + self.filename\n self.filename = munge.munge_filename_legacy(self.filename)\n self.filepath = os.path.join(self.storage_path, self.filename)\n data_dict[url_field] = self.filename\n self.upload_file = _get_underlying_file(\n self.upload_field_storage)\n self.tmp_filepath = self.filepath + '~'\n # keep the file if there has been no change\n elif self.old_filename and not self.old_filename.startswith('http'):\n if not self.clear:\n data_dict[url_field] = self.old_filename\n if self.clear and self.url == self.old_filename:\n data_dict[url_field] = ''\n\n def upload(self, max_size=2):\n ''' Actually upload the file.\n This should happen just before a commit but after the data has\n been validated and flushed to the db. This is so we do not store\n anything unless the request is actually good.\n max_size is size in MB maximum of the file'''\n\n if self.filename:\n with open(self.tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(self.tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(self.tmp_filepath, self.filepath)\n self.clear = True\n\n if (self.clear and self.old_filename\n and not self.old_filename.startswith('http')):\n try:\n os.remove(self.old_filepath)\n except OSError:\n pass\n\n\nclass ResourceUpload(object):\n def __init__(self, resource):\n path = get_storage_path()\n config_mimetype_guess = config.get('ckan.mimetype_guess', 'file_ext')\n\n if not path:\n self.storage_path = None\n return\n self.storage_path = os.path.join(path, 'resources')\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.filename = None\n self.mimetype = None\n\n url = resource.get('url')\n\n upload_field_storage = resource.pop('upload', None)\n self.clear = resource.pop('clear_upload', None)\n\n if url and config_mimetype_guess == 'file_ext' and urlparse(url).path:\n self.mimetype = mimetypes.guess_type(url)[0]\n\n if bool(upload_field_storage) and \\\n isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):\n self.filesize = 0 # bytes\n\n self.filename = upload_field_storage.filename\n self.filename = munge.munge_filename(self.filename)\n resource['url'] = self.filename\n resource['url_type'] = 'upload'\n resource['last_modified'] = datetime.datetime.utcnow()\n self.upload_file = _get_underlying_file(upload_field_storage)\n self.upload_file.seek(0, os.SEEK_END)\n self.filesize = self.upload_file.tell()\n # go back to the beginning of the file buffer\n self.upload_file.seek(0, os.SEEK_SET)\n\n # check if the mimetype failed from guessing with the url\n if not self.mimetype and config_mimetype_guess == 'file_ext':\n self.mimetype = mimetypes.guess_type(self.filename)[0]\n\n if not self.mimetype and config_mimetype_guess == 'file_contents':\n try:\n self.mimetype = magic.from_buffer(self.upload_file.read(),\n mime=True)\n self.upload_file.seek(0, os.SEEK_SET)\n except IOError as e:\n # Not that important if call above fails\n self.mimetype = None\n\n elif self.clear:\n resource['url_type'] = ''\n\n def get_directory(self, id):\n directory = os.path.join(self.storage_path,\n id[0:3], id[3:6])\n return directory\n\n def get_path(self, id):\n directory = self.get_directory(id)\n filepath = os.path.join(directory, id[6:])\n return filepath\n\n def upload(self, id, max_size=10):\n '''Actually upload the file.\n\n :returns: ``'file uploaded'`` if a new file was successfully uploaded\n (whether it overwrote a previously uploaded file or not),\n ``'file deleted'`` if an existing uploaded file was deleted,\n or ``None`` if nothing changed\n :rtype: ``string`` or ``None``\n\n '''\n if not self.storage_path:\n return\n\n # Get directory and filepath on the system\n # where the file for this resource will be stored\n directory = self.get_directory(id)\n filepath = self.get_path(id)\n\n # If a filename has been provided (a file is being uploaded)\n # we write it to the filepath (and overwrite it if it already\n # exists). This way the uploaded file will always be stored\n # in the same location\n if self.filename:\n try:\n os.makedirs(directory)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n tmp_filepath = filepath + '~'\n with open(tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(tmp_filepath, filepath)\n return\n\n # The resource form only sets self.clear (via the input clear_upload)\n # to True when an uploaded file is not replaced by another uploaded\n # file, only if it is replaced by a link to file.\n # If the uploaded file is replaced by a link, we should remove the\n # previously uploaded file to clean up the file system.\n if self.clear:\n try:\n os.remove(filepath)\n except OSError as e:\n pass\n", "path": "ckan/lib/uploader.py"}]}
3,853
164
gh_patches_debug_22181
rasdani/github-patches
git_diff
fossasia__open-event-server-6129
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- order_expiry_time migration error **Describe the bug** The latest migration, `2c7ff9781032` had a bug where the server_default was supposed to be a string. **Additional context** Working on it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `migrations/versions/2c7ff9781032_.py` Content: ``` 1 """empty message 2 3 Revision ID: 2c7ff9781032 4 Revises: b08a4ffff5dd 5 Create Date: 2019-06-21 04:33:59.021835 6 7 """ 8 9 from alembic import op 10 import sqlalchemy as sa 11 import sqlalchemy_utils 12 13 14 # revision identifiers, used by Alembic. 15 revision = '2c7ff9781032' 16 down_revision = 'b08a4ffff5dd' 17 18 19 def upgrade(): 20 # ### commands auto generated by Alembic - please adjust! ### 21 op.drop_column('events', 'order_expiry_time') 22 op.drop_column('events_version', 'order_expiry_time') 23 op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False)) 24 # ### end Alembic commands ### 25 26 27 def downgrade(): 28 # ### commands auto generated by Alembic - please adjust! ### 29 op.drop_column('settings', 'order_expiry_time') 30 op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False)) 31 op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False)) 32 # ### end Alembic commands ### 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/migrations/versions/2c7ff9781032_.py b/migrations/versions/2c7ff9781032_.py --- a/migrations/versions/2c7ff9781032_.py +++ b/migrations/versions/2c7ff9781032_.py @@ -20,13 +20,13 @@ # ### commands auto generated by Alembic - please adjust! ### op.drop_column('events', 'order_expiry_time') op.drop_column('events_version', 'order_expiry_time') - op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False)) + op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('settings', 'order_expiry_time') - op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False)) - op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False)) + op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True)) # ### end Alembic commands ###
{"golden_diff": "diff --git a/migrations/versions/2c7ff9781032_.py b/migrations/versions/2c7ff9781032_.py\n--- a/migrations/versions/2c7ff9781032_.py\n+++ b/migrations/versions/2c7ff9781032_.py\n@@ -20,13 +20,13 @@\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n- op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))\n+ op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False))\n # ### end Alembic commands ###\n \n \n def downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n- op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n- op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n+ op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n+ op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "issue": "order_expiry_time migration error\n**Describe the bug**\r\nThe latest migration, `2c7ff9781032` had a bug where the server_default was supposed to be a string. \r\n\r\n\r\n**Additional context**\r\nWorking on it.\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: 2c7ff9781032\nRevises: b08a4ffff5dd\nCreate Date: 2019-06-21 04:33:59.021835\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c7ff9781032'\ndown_revision = 'b08a4ffff5dd'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), default=15, nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=False))\n # ### end Alembic commands ###\n", "path": "migrations/versions/2c7ff9781032_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: 2c7ff9781032\nRevises: b08a4ffff5dd\nCreate Date: 2019-06-21 04:33:59.021835\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '2c7ff9781032'\ndown_revision = 'b08a4ffff5dd'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('events', 'order_expiry_time')\n op.drop_column('events_version', 'order_expiry_time')\n op.add_column('settings', sa.Column('order_expiry_time', sa.Integer(), server_default='15', nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('settings', 'order_expiry_time')\n op.add_column('events_version', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('events', sa.Column('order_expiry_time', sa.INTEGER(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n", "path": "migrations/versions/2c7ff9781032_.py"}]}
674
342
gh_patches_debug_33020
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-2827
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When validating the form for adding book manually, some fields are emptied if a field is not valid **Describe the bug** When adding manually an item (book), if a field is not valid, after hitting enter some fields are emptied (values removed), such as the author, the subject, the cover, etc. Steps to reproduce the behavior: 1. Add manually a book. 2. Fill the form with a publication date having only the year. 3. Add an author, subjects and a cover. 4. Save the form. 5. See the emptied fields. **Expected behavior** All filled in fields should keep their value after validating the form. **Instance** https://bw.diaspodon.fr **Additional context** Add any other context about the problem here. --- **Desktop (please complete the following information):** - OS: Archlinux - Browser: Firefox - Version: 108 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/views/books/edit_book.py` Content: ``` 1 """ the good stuff! the books! """ 2 from re import sub, findall 3 from django.contrib.auth.decorators import login_required, permission_required 4 from django.contrib.postgres.search import SearchRank, SearchVector 5 from django.db import transaction 6 from django.http import HttpResponseBadRequest 7 from django.shortcuts import get_object_or_404, redirect 8 from django.template.response import TemplateResponse 9 from django.utils.decorators import method_decorator 10 from django.views.decorators.http import require_POST 11 from django.views import View 12 13 from bookwyrm import book_search, forms, models 14 15 # from bookwyrm.activitypub.base_activity import ActivityObject 16 from bookwyrm.utils.isni import ( 17 find_authors_by_name, 18 build_author_from_isni, 19 augment_author_metadata, 20 ) 21 from bookwyrm.views.helpers import get_edition 22 from .books import set_cover_from_url 23 24 # pylint: disable=no-self-use 25 @method_decorator(login_required, name="dispatch") 26 @method_decorator( 27 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch" 28 ) 29 class EditBook(View): 30 """edit a book""" 31 32 def get(self, request, book_id): 33 """info about a book""" 34 book = get_edition(book_id) 35 if not book.description: 36 book.description = book.parent_work.description 37 data = {"book": book, "form": forms.EditionForm(instance=book)} 38 return TemplateResponse(request, "book/edit/edit_book.html", data) 39 40 def post(self, request, book_id): 41 """edit a book cool""" 42 book = get_object_or_404(models.Edition, id=book_id) 43 form = forms.EditionForm(request.POST, request.FILES, instance=book) 44 45 data = {"book": book, "form": form} 46 ensure_transient_values_persist(request, data) 47 if not form.is_valid(): 48 return TemplateResponse(request, "book/edit/edit_book.html", data) 49 50 data = add_authors(request, data) 51 52 # either of the above cases requires additional confirmation 53 if data.get("add_author"): 54 return TemplateResponse(request, "book/edit/edit_book.html", data) 55 56 remove_authors = request.POST.getlist("remove_authors") 57 for author_id in remove_authors: 58 book.authors.remove(author_id) 59 60 book = form.save(request, commit=False) 61 62 url = request.POST.get("cover-url") 63 if url: 64 image = set_cover_from_url(url) 65 if image: 66 book.cover.save(*image, save=False) 67 68 book.save() 69 return redirect(f"/book/{book.id}") 70 71 72 @method_decorator(login_required, name="dispatch") 73 @method_decorator( 74 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch" 75 ) 76 class CreateBook(View): 77 """brand new book""" 78 79 def get(self, request): 80 """info about a book""" 81 data = {"form": forms.EditionForm()} 82 return TemplateResponse(request, "book/edit/edit_book.html", data) 83 84 # pylint: disable=too-many-locals 85 def post(self, request): 86 """create a new book""" 87 # returns None if no match is found 88 form = forms.EditionForm(request.POST, request.FILES) 89 data = {"form": form} 90 91 # collect data provided by the work or import item 92 parent_work_id = request.POST.get("parent_work") 93 authors = None 94 if request.POST.get("authors"): 95 author_ids = findall(r"\d+", request.POST["authors"]) 96 authors = models.Author.objects.filter(id__in=author_ids) 97 98 # fake book in case we need to keep editing 99 if parent_work_id: 100 data["book"] = { 101 "parent_work": {"id": parent_work_id}, 102 "authors": authors, 103 } 104 105 ensure_transient_values_persist(request, data) 106 107 if not form.is_valid(): 108 return TemplateResponse(request, "book/edit/edit_book.html", data) 109 110 data = add_authors(request, data) 111 112 # check if this is an edition of an existing work 113 author_text = ", ".join(data.get("add_author", [])) 114 data["book_matches"] = book_search.search( 115 f'{form.cleaned_data.get("title")} {author_text}', 116 min_confidence=0.1, 117 )[:5] 118 119 # go to confirm mode 120 if not parent_work_id or data.get("add_author"): 121 data["confirm_mode"] = True 122 return TemplateResponse(request, "book/edit/edit_book.html", data) 123 124 with transaction.atomic(): 125 book = form.save(request) 126 parent_work = get_object_or_404(models.Work, id=parent_work_id) 127 book.parent_work = parent_work 128 129 if authors: 130 book.authors.add(*authors) 131 132 url = request.POST.get("cover-url") 133 if url: 134 image = set_cover_from_url(url) 135 if image: 136 book.cover.save(*image, save=False) 137 138 book.save() 139 return redirect(f"/book/{book.id}") 140 141 142 def ensure_transient_values_persist(request, data): 143 """ensure that values of transient form fields persist when re-rendering the form""" 144 data["cover_url"] = request.POST.get("cover-url") 145 146 147 def add_authors(request, data): 148 """helper for adding authors""" 149 add_author = [author for author in request.POST.getlist("add_author") if author] 150 if not add_author: 151 return data 152 153 data["add_author"] = add_author 154 data["author_matches"] = [] 155 data["isni_matches"] = [] 156 157 # creating a book or adding an author to a book needs another step 158 data["confirm_mode"] = True 159 # this isn't preserved because it isn't part of the form obj 160 data["remove_authors"] = request.POST.getlist("remove_authors") 161 162 for author in add_author: 163 # filter out empty author fields 164 if not author: 165 continue 166 # check for existing authors 167 vector = SearchVector("name", weight="A") + SearchVector("aliases", weight="B") 168 169 author_matches = ( 170 models.Author.objects.annotate(search=vector) 171 .annotate(rank=SearchRank(vector, author)) 172 .filter(rank__gt=0.4) 173 .order_by("-rank")[:5] 174 ) 175 176 isni_authors = find_authors_by_name( 177 author, description=True 178 ) # find matches from ISNI API 179 180 # dedupe isni authors we already have in the DB 181 exists = [ 182 i 183 for i in isni_authors 184 for a in author_matches 185 if sub(r"\D", "", str(i.isni)) == sub(r"\D", "", str(a.isni)) 186 ] 187 188 # pylint: disable=cell-var-from-loop 189 matches = list(filter(lambda x: x not in exists, isni_authors)) 190 # combine existing and isni authors 191 matches.extend(author_matches) 192 193 data["author_matches"].append( 194 { 195 "name": author.strip(), 196 "matches": matches, 197 "existing_isnis": exists, 198 } 199 ) 200 return data 201 202 203 @require_POST 204 @permission_required("bookwyrm.edit_book", raise_exception=True) 205 def create_book_from_data(request): 206 """create a book with starter data""" 207 author_ids = findall(r"\d+", request.POST.get("authors")) 208 book = { 209 "parent_work": {"id": request.POST.get("parent_work")}, 210 "authors": models.Author.objects.filter(id__in=author_ids).all(), 211 "subjects": request.POST.getlist("subjects"), 212 } 213 214 data = {"book": book, "form": forms.EditionForm(request.POST)} 215 return TemplateResponse(request, "book/edit/edit_book.html", data) 216 217 218 @method_decorator(login_required, name="dispatch") 219 @method_decorator( 220 permission_required("bookwyrm.edit_book", raise_exception=True), name="dispatch" 221 ) 222 class ConfirmEditBook(View): 223 """confirm edits to a book""" 224 225 # pylint: disable=too-many-locals 226 # pylint: disable=too-many-branches 227 def post(self, request, book_id=None): 228 """edit a book cool""" 229 # returns None if no match is found 230 book = models.Edition.objects.filter(id=book_id).first() 231 form = forms.EditionForm(request.POST, request.FILES, instance=book) 232 233 data = {"book": book, "form": form} 234 if not form.is_valid(): 235 return TemplateResponse(request, "book/edit/edit_book.html", data) 236 237 with transaction.atomic(): 238 # save book 239 book = form.save(request) 240 241 # add known authors 242 authors = None 243 if request.POST.get("authors"): 244 author_ids = findall(r"\d+", request.POST["authors"]) 245 authors = models.Author.objects.filter(id__in=author_ids) 246 book.authors.add(*authors) 247 248 # get or create author as needed 249 for i in range(int(request.POST.get("author-match-count", 0))): 250 match = request.POST.get(f"author_match-{i}") 251 if not match: 252 return HttpResponseBadRequest() 253 try: 254 # if it's an int, it's an ID 255 match = int(match) 256 author = get_object_or_404( 257 models.Author, id=request.POST[f"author_match-{i}"] 258 ) 259 # update author metadata if the ISNI record is more complete 260 isni = request.POST.get(f"isni-for-{match}", None) 261 if isni is not None: 262 augment_author_metadata(author, isni) 263 except ValueError: 264 # otherwise it's a new author 265 isni_match = request.POST.get(f"author_match-{i}") 266 author_object = build_author_from_isni(isni_match) 267 # with author data class from isni id 268 if "author" in author_object: 269 skeleton = models.Author.objects.create( 270 name=author_object["author"].name 271 ) 272 author = author_object["author"].to_model( 273 model=models.Author, overwrite=True, instance=skeleton 274 ) 275 else: 276 # or it's just a name 277 author = models.Author.objects.create(name=match) 278 book.authors.add(author) 279 280 # create work, if needed 281 if not book.parent_work: 282 work_match = request.POST.get("parent_work") 283 if work_match and work_match != "0": 284 work = get_object_or_404(models.Work, id=work_match) 285 else: 286 work = models.Work.objects.create(title=form.cleaned_data["title"]) 287 work.authors.set(book.authors.all()) 288 book.parent_work = work 289 290 for author_id in request.POST.getlist("remove_authors"): 291 book.authors.remove(author_id) 292 293 # import cover, if requested 294 url = request.POST.get("cover-url") 295 if url: 296 image = set_cover_from_url(url) 297 if image: 298 book.cover.save(*image, save=False) 299 300 # we don't tell the world when creating a book 301 book.save(broadcast=False) 302 303 return redirect(f"/book/{book.id}") 304 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/views/books/edit_book.py b/bookwyrm/views/books/edit_book.py --- a/bookwyrm/views/books/edit_book.py +++ b/bookwyrm/views/books/edit_book.py @@ -45,6 +45,7 @@ data = {"book": book, "form": form} ensure_transient_values_persist(request, data) if not form.is_valid(): + ensure_transient_values_persist(request, data, add_author=True) return TemplateResponse(request, "book/edit/edit_book.html", data) data = add_authors(request, data) @@ -102,11 +103,13 @@ "authors": authors, } - ensure_transient_values_persist(request, data) - if not form.is_valid(): + ensure_transient_values_persist(request, data, form=form) return TemplateResponse(request, "book/edit/edit_book.html", data) + # we have to call this twice because it requires form.cleaned_data + # which only exists after we validate the form + ensure_transient_values_persist(request, data, form=form) data = add_authors(request, data) # check if this is an edition of an existing work @@ -139,9 +142,15 @@ return redirect(f"/book/{book.id}") -def ensure_transient_values_persist(request, data): +def ensure_transient_values_persist(request, data, **kwargs): """ensure that values of transient form fields persist when re-rendering the form""" data["cover_url"] = request.POST.get("cover-url") + if kwargs and kwargs.get("form"): + data["book"] = data.get("book") or {} + data["book"]["subjects"] = kwargs["form"].cleaned_data["subjects"] + data["add_author"] = request.POST.getlist("add_author") + elif kwargs and kwargs.get("add_author") is True: + data["add_author"] = request.POST.getlist("add_author") def add_authors(request, data):
{"golden_diff": "diff --git a/bookwyrm/views/books/edit_book.py b/bookwyrm/views/books/edit_book.py\n--- a/bookwyrm/views/books/edit_book.py\n+++ b/bookwyrm/views/books/edit_book.py\n@@ -45,6 +45,7 @@\n data = {\"book\": book, \"form\": form}\n ensure_transient_values_persist(request, data)\n if not form.is_valid():\n+ ensure_transient_values_persist(request, data, add_author=True)\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n \n data = add_authors(request, data)\n@@ -102,11 +103,13 @@\n \"authors\": authors,\n }\n \n- ensure_transient_values_persist(request, data)\n-\n if not form.is_valid():\n+ ensure_transient_values_persist(request, data, form=form)\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n \n+ # we have to call this twice because it requires form.cleaned_data\n+ # which only exists after we validate the form\n+ ensure_transient_values_persist(request, data, form=form)\n data = add_authors(request, data)\n \n # check if this is an edition of an existing work\n@@ -139,9 +142,15 @@\n return redirect(f\"/book/{book.id}\")\n \n \n-def ensure_transient_values_persist(request, data):\n+def ensure_transient_values_persist(request, data, **kwargs):\n \"\"\"ensure that values of transient form fields persist when re-rendering the form\"\"\"\n data[\"cover_url\"] = request.POST.get(\"cover-url\")\n+ if kwargs and kwargs.get(\"form\"):\n+ data[\"book\"] = data.get(\"book\") or {}\n+ data[\"book\"][\"subjects\"] = kwargs[\"form\"].cleaned_data[\"subjects\"]\n+ data[\"add_author\"] = request.POST.getlist(\"add_author\")\n+ elif kwargs and kwargs.get(\"add_author\") is True:\n+ data[\"add_author\"] = request.POST.getlist(\"add_author\")\n \n \n def add_authors(request, data):\n", "issue": "When validating the form for adding book manually, some fields are emptied if a field is not valid\n**Describe the bug**\r\nWhen adding manually an item (book), if a field is not valid, after hitting enter some fields are emptied (values removed), such as the author, the subject, the cover, etc.\r\n\r\nSteps to reproduce the behavior:\r\n1. Add manually a book.\r\n2. Fill the form with a publication date having only the year.\r\n3. Add an author, subjects and a cover.\r\n4. Save the form.\r\n5. See the emptied fields.\r\n\r\n**Expected behavior**\r\nAll filled in fields should keep their value after validating the form.\r\n\r\n**Instance**\r\nhttps://bw.diaspodon.fr\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Archlinux\r\n - Browser: Firefox\r\n - Version: 108\r\n\n", "before_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom re import sub, findall\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.postgres.search import SearchRank, SearchVector\nfrom django.db import transaction\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\nfrom django.views import View\n\nfrom bookwyrm import book_search, forms, models\n\n# from bookwyrm.activitypub.base_activity import ActivityObject\nfrom bookwyrm.utils.isni import (\n find_authors_by_name,\n build_author_from_isni,\n augment_author_metadata,\n)\nfrom bookwyrm.views.helpers import get_edition\nfrom .books import set_cover_from_url\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass EditBook(View):\n \"\"\"edit a book\"\"\"\n\n def get(self, request, book_id):\n \"\"\"info about a book\"\"\"\n book = get_edition(book_id)\n if not book.description:\n book.description = book.parent_work.description\n data = {\"book\": book, \"form\": forms.EditionForm(instance=book)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n def post(self, request, book_id):\n \"\"\"edit a book cool\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n ensure_transient_values_persist(request, data)\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # either of the above cases requires additional confirmation\n if data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n remove_authors = request.POST.getlist(\"remove_authors\")\n for author_id in remove_authors:\n book.authors.remove(author_id)\n\n book = form.save(request, commit=False)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass CreateBook(View):\n \"\"\"brand new book\"\"\"\n\n def get(self, request):\n \"\"\"info about a book\"\"\"\n data = {\"form\": forms.EditionForm()}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n # pylint: disable=too-many-locals\n def post(self, request):\n \"\"\"create a new book\"\"\"\n # returns None if no match is found\n form = forms.EditionForm(request.POST, request.FILES)\n data = {\"form\": form}\n\n # collect data provided by the work or import item\n parent_work_id = request.POST.get(\"parent_work\")\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n\n # fake book in case we need to keep editing\n if parent_work_id:\n data[\"book\"] = {\n \"parent_work\": {\"id\": parent_work_id},\n \"authors\": authors,\n }\n\n ensure_transient_values_persist(request, data)\n\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # check if this is an edition of an existing work\n author_text = \", \".join(data.get(\"add_author\", []))\n data[\"book_matches\"] = book_search.search(\n f'{form.cleaned_data.get(\"title\")} {author_text}',\n min_confidence=0.1,\n )[:5]\n\n # go to confirm mode\n if not parent_work_id or data.get(\"add_author\"):\n data[\"confirm_mode\"] = True\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n book = form.save(request)\n parent_work = get_object_or_404(models.Work, id=parent_work_id)\n book.parent_work = parent_work\n\n if authors:\n book.authors.add(*authors)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\ndef ensure_transient_values_persist(request, data):\n \"\"\"ensure that values of transient form fields persist when re-rendering the form\"\"\"\n data[\"cover_url\"] = request.POST.get(\"cover-url\")\n\n\ndef add_authors(request, data):\n \"\"\"helper for adding authors\"\"\"\n add_author = [author for author in request.POST.getlist(\"add_author\") if author]\n if not add_author:\n return data\n\n data[\"add_author\"] = add_author\n data[\"author_matches\"] = []\n data[\"isni_matches\"] = []\n\n # creating a book or adding an author to a book needs another step\n data[\"confirm_mode\"] = True\n # this isn't preserved because it isn't part of the form obj\n data[\"remove_authors\"] = request.POST.getlist(\"remove_authors\")\n\n for author in add_author:\n # filter out empty author fields\n if not author:\n continue\n # check for existing authors\n vector = SearchVector(\"name\", weight=\"A\") + SearchVector(\"aliases\", weight=\"B\")\n\n author_matches = (\n models.Author.objects.annotate(search=vector)\n .annotate(rank=SearchRank(vector, author))\n .filter(rank__gt=0.4)\n .order_by(\"-rank\")[:5]\n )\n\n isni_authors = find_authors_by_name(\n author, description=True\n ) # find matches from ISNI API\n\n # dedupe isni authors we already have in the DB\n exists = [\n i\n for i in isni_authors\n for a in author_matches\n if sub(r\"\\D\", \"\", str(i.isni)) == sub(r\"\\D\", \"\", str(a.isni))\n ]\n\n # pylint: disable=cell-var-from-loop\n matches = list(filter(lambda x: x not in exists, isni_authors))\n # combine existing and isni authors\n matches.extend(author_matches)\n\n data[\"author_matches\"].append(\n {\n \"name\": author.strip(),\n \"matches\": matches,\n \"existing_isnis\": exists,\n }\n )\n return data\n\n\n@require_POST\n@permission_required(\"bookwyrm.edit_book\", raise_exception=True)\ndef create_book_from_data(request):\n \"\"\"create a book with starter data\"\"\"\n author_ids = findall(r\"\\d+\", request.POST.get(\"authors\"))\n book = {\n \"parent_work\": {\"id\": request.POST.get(\"parent_work\")},\n \"authors\": models.Author.objects.filter(id__in=author_ids).all(),\n \"subjects\": request.POST.getlist(\"subjects\"),\n }\n\n data = {\"book\": book, \"form\": forms.EditionForm(request.POST)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass ConfirmEditBook(View):\n \"\"\"confirm edits to a book\"\"\"\n\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n def post(self, request, book_id=None):\n \"\"\"edit a book cool\"\"\"\n # returns None if no match is found\n book = models.Edition.objects.filter(id=book_id).first()\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n # save book\n book = form.save(request)\n\n # add known authors\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n book.authors.add(*authors)\n\n # get or create author as needed\n for i in range(int(request.POST.get(\"author-match-count\", 0))):\n match = request.POST.get(f\"author_match-{i}\")\n if not match:\n return HttpResponseBadRequest()\n try:\n # if it's an int, it's an ID\n match = int(match)\n author = get_object_or_404(\n models.Author, id=request.POST[f\"author_match-{i}\"]\n )\n # update author metadata if the ISNI record is more complete\n isni = request.POST.get(f\"isni-for-{match}\", None)\n if isni is not None:\n augment_author_metadata(author, isni)\n except ValueError:\n # otherwise it's a new author\n isni_match = request.POST.get(f\"author_match-{i}\")\n author_object = build_author_from_isni(isni_match)\n # with author data class from isni id\n if \"author\" in author_object:\n skeleton = models.Author.objects.create(\n name=author_object[\"author\"].name\n )\n author = author_object[\"author\"].to_model(\n model=models.Author, overwrite=True, instance=skeleton\n )\n else:\n # or it's just a name\n author = models.Author.objects.create(name=match)\n book.authors.add(author)\n\n # create work, if needed\n if not book.parent_work:\n work_match = request.POST.get(\"parent_work\")\n if work_match and work_match != \"0\":\n work = get_object_or_404(models.Work, id=work_match)\n else:\n work = models.Work.objects.create(title=form.cleaned_data[\"title\"])\n work.authors.set(book.authors.all())\n book.parent_work = work\n\n for author_id in request.POST.getlist(\"remove_authors\"):\n book.authors.remove(author_id)\n\n # import cover, if requested\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n # we don't tell the world when creating a book\n book.save(broadcast=False)\n\n return redirect(f\"/book/{book.id}\")\n", "path": "bookwyrm/views/books/edit_book.py"}], "after_files": [{"content": "\"\"\" the good stuff! the books! \"\"\"\nfrom re import sub, findall\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.postgres.search import SearchRank, SearchVector\nfrom django.db import transaction\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.http import require_POST\nfrom django.views import View\n\nfrom bookwyrm import book_search, forms, models\n\n# from bookwyrm.activitypub.base_activity import ActivityObject\nfrom bookwyrm.utils.isni import (\n find_authors_by_name,\n build_author_from_isni,\n augment_author_metadata,\n)\nfrom bookwyrm.views.helpers import get_edition\nfrom .books import set_cover_from_url\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass EditBook(View):\n \"\"\"edit a book\"\"\"\n\n def get(self, request, book_id):\n \"\"\"info about a book\"\"\"\n book = get_edition(book_id)\n if not book.description:\n book.description = book.parent_work.description\n data = {\"book\": book, \"form\": forms.EditionForm(instance=book)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n def post(self, request, book_id):\n \"\"\"edit a book cool\"\"\"\n book = get_object_or_404(models.Edition, id=book_id)\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n ensure_transient_values_persist(request, data)\n if not form.is_valid():\n ensure_transient_values_persist(request, data, add_author=True)\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n data = add_authors(request, data)\n\n # either of the above cases requires additional confirmation\n if data.get(\"add_author\"):\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n remove_authors = request.POST.getlist(\"remove_authors\")\n for author_id in remove_authors:\n book.authors.remove(author_id)\n\n book = form.save(request, commit=False)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass CreateBook(View):\n \"\"\"brand new book\"\"\"\n\n def get(self, request):\n \"\"\"info about a book\"\"\"\n data = {\"form\": forms.EditionForm()}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n # pylint: disable=too-many-locals\n def post(self, request):\n \"\"\"create a new book\"\"\"\n # returns None if no match is found\n form = forms.EditionForm(request.POST, request.FILES)\n data = {\"form\": form}\n\n # collect data provided by the work or import item\n parent_work_id = request.POST.get(\"parent_work\")\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n\n # fake book in case we need to keep editing\n if parent_work_id:\n data[\"book\"] = {\n \"parent_work\": {\"id\": parent_work_id},\n \"authors\": authors,\n }\n\n if not form.is_valid():\n ensure_transient_values_persist(request, data, form=form)\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n # we have to call this twice because it requires form.cleaned_data\n # which only exists after we validate the form\n ensure_transient_values_persist(request, data, form=form)\n data = add_authors(request, data)\n\n # check if this is an edition of an existing work\n author_text = \", \".join(data.get(\"add_author\", []))\n data[\"book_matches\"] = book_search.search(\n f'{form.cleaned_data.get(\"title\")} {author_text}',\n min_confidence=0.1,\n )[:5]\n\n # go to confirm mode\n if not parent_work_id or data.get(\"add_author\"):\n data[\"confirm_mode\"] = True\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n book = form.save(request)\n parent_work = get_object_or_404(models.Work, id=parent_work_id)\n book.parent_work = parent_work\n\n if authors:\n book.authors.add(*authors)\n\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n book.save()\n return redirect(f\"/book/{book.id}\")\n\n\ndef ensure_transient_values_persist(request, data, **kwargs):\n \"\"\"ensure that values of transient form fields persist when re-rendering the form\"\"\"\n data[\"cover_url\"] = request.POST.get(\"cover-url\")\n if kwargs and kwargs.get(\"form\"):\n data[\"book\"] = data.get(\"book\") or {}\n data[\"book\"][\"subjects\"] = kwargs[\"form\"].cleaned_data[\"subjects\"]\n data[\"add_author\"] = request.POST.getlist(\"add_author\")\n elif kwargs and kwargs.get(\"add_author\") is True:\n data[\"add_author\"] = request.POST.getlist(\"add_author\")\n\n\ndef add_authors(request, data):\n \"\"\"helper for adding authors\"\"\"\n add_author = [author for author in request.POST.getlist(\"add_author\") if author]\n if not add_author:\n return data\n\n data[\"add_author\"] = add_author\n data[\"author_matches\"] = []\n data[\"isni_matches\"] = []\n\n # creating a book or adding an author to a book needs another step\n data[\"confirm_mode\"] = True\n # this isn't preserved because it isn't part of the form obj\n data[\"remove_authors\"] = request.POST.getlist(\"remove_authors\")\n\n for author in add_author:\n # filter out empty author fields\n if not author:\n continue\n # check for existing authors\n vector = SearchVector(\"name\", weight=\"A\") + SearchVector(\"aliases\", weight=\"B\")\n\n author_matches = (\n models.Author.objects.annotate(search=vector)\n .annotate(rank=SearchRank(vector, author))\n .filter(rank__gt=0.4)\n .order_by(\"-rank\")[:5]\n )\n\n isni_authors = find_authors_by_name(\n author, description=True\n ) # find matches from ISNI API\n\n # dedupe isni authors we already have in the DB\n exists = [\n i\n for i in isni_authors\n for a in author_matches\n if sub(r\"\\D\", \"\", str(i.isni)) == sub(r\"\\D\", \"\", str(a.isni))\n ]\n\n # pylint: disable=cell-var-from-loop\n matches = list(filter(lambda x: x not in exists, isni_authors))\n # combine existing and isni authors\n matches.extend(author_matches)\n\n data[\"author_matches\"].append(\n {\n \"name\": author.strip(),\n \"matches\": matches,\n \"existing_isnis\": exists,\n }\n )\n return data\n\n\n@require_POST\n@permission_required(\"bookwyrm.edit_book\", raise_exception=True)\ndef create_book_from_data(request):\n \"\"\"create a book with starter data\"\"\"\n author_ids = findall(r\"\\d+\", request.POST.get(\"authors\"))\n book = {\n \"parent_work\": {\"id\": request.POST.get(\"parent_work\")},\n \"authors\": models.Author.objects.filter(id__in=author_ids).all(),\n \"subjects\": request.POST.getlist(\"subjects\"),\n }\n\n data = {\"book\": book, \"form\": forms.EditionForm(request.POST)}\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_book\", raise_exception=True), name=\"dispatch\"\n)\nclass ConfirmEditBook(View):\n \"\"\"confirm edits to a book\"\"\"\n\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n def post(self, request, book_id=None):\n \"\"\"edit a book cool\"\"\"\n # returns None if no match is found\n book = models.Edition.objects.filter(id=book_id).first()\n form = forms.EditionForm(request.POST, request.FILES, instance=book)\n\n data = {\"book\": book, \"form\": form}\n if not form.is_valid():\n return TemplateResponse(request, \"book/edit/edit_book.html\", data)\n\n with transaction.atomic():\n # save book\n book = form.save(request)\n\n # add known authors\n authors = None\n if request.POST.get(\"authors\"):\n author_ids = findall(r\"\\d+\", request.POST[\"authors\"])\n authors = models.Author.objects.filter(id__in=author_ids)\n book.authors.add(*authors)\n\n # get or create author as needed\n for i in range(int(request.POST.get(\"author-match-count\", 0))):\n match = request.POST.get(f\"author_match-{i}\")\n if not match:\n return HttpResponseBadRequest()\n try:\n # if it's an int, it's an ID\n match = int(match)\n author = get_object_or_404(\n models.Author, id=request.POST[f\"author_match-{i}\"]\n )\n # update author metadata if the ISNI record is more complete\n isni = request.POST.get(f\"isni-for-{match}\", None)\n if isni is not None:\n augment_author_metadata(author, isni)\n except ValueError:\n # otherwise it's a new author\n isni_match = request.POST.get(f\"author_match-{i}\")\n author_object = build_author_from_isni(isni_match)\n # with author data class from isni id\n if \"author\" in author_object:\n skeleton = models.Author.objects.create(\n name=author_object[\"author\"].name\n )\n author = author_object[\"author\"].to_model(\n model=models.Author, overwrite=True, instance=skeleton\n )\n else:\n # or it's just a name\n author = models.Author.objects.create(name=match)\n book.authors.add(author)\n\n # create work, if needed\n if not book.parent_work:\n work_match = request.POST.get(\"parent_work\")\n if work_match and work_match != \"0\":\n work = get_object_or_404(models.Work, id=work_match)\n else:\n work = models.Work.objects.create(title=form.cleaned_data[\"title\"])\n work.authors.set(book.authors.all())\n book.parent_work = work\n\n for author_id in request.POST.getlist(\"remove_authors\"):\n book.authors.remove(author_id)\n\n # import cover, if requested\n url = request.POST.get(\"cover-url\")\n if url:\n image = set_cover_from_url(url)\n if image:\n book.cover.save(*image, save=False)\n\n # we don't tell the world when creating a book\n book.save(broadcast=False)\n\n return redirect(f\"/book/{book.id}\")\n", "path": "bookwyrm/views/books/edit_book.py"}]}
3,638
446
gh_patches_debug_30737
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1696
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exceptions in E3002 and E3003 when evaluating AWS::StepFunctions::StateMachine.TagsEntry for eu-west-2 Given the following template: ```yaml AWSTemplateFormatVersion: '2010-09-09' Transform: AWS::Serverless-2016-10-31 Resources: StateMachineRole: Type: AWS::IAM::Role Properties: AssumeRolePolicyDocument: Version: '2012-10-17' Statement: - Effect: Allow Principal: Service: states.amazonaws.com Action: sts:AssumeRole StateMachine: Type: AWS::Serverless::StateMachine Properties: Definition: StartAt: Example Role: !GetAtt StateMachineRole.Arn ``` The following linting errors are output when using cfn-lint 0.35.1 with the eu-west-2 region specified: ``` E0002 Unknown exception while processing rule E3002: 'AWS::StepFunctions::StateMachine.TagsEntry' example.yml:1:1 E0002 Unknown exception while processing rule E3003: 'AWS::StepFunctions::StateMachine.TagsEntry' example.yml:1:1 ``` The errors are not shown when specifying the region as eu-west-1 or us-east-1. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/update_specs_services_from_ssm.py` Content: ``` 1 #!/usr/bin/env python 2 """ 3 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 SPDX-License-Identifier: MIT-0 5 """ 6 import logging 7 import json 8 import boto3 9 from cfnlint.helpers import get_url_content 10 from cfnlint.helpers import REGIONS 11 from cfnlint.maintenance import SPEC_REGIONS 12 13 """ 14 Updates our dynamic patches from SSM data 15 This script requires Boto3 and Credentials to call the SSM API 16 """ 17 18 LOGGER = logging.getLogger('cfnlint') 19 20 exclude_regions = { 21 'Asia Pacific (Osaka-Local)': 'ap-northeast-3', 22 } 23 24 service_map = { 25 'acm': ['AWS::CertificateManager::'], 26 'apigateway': ['AWS::ApiGateway::', 'AWS::ApiGatewayV2::'], 27 'application-autoscaling': ['AWS::ApplicationAutoScaling::'], 28 'appstream': ['AWS::AppStream::'], 29 'appsync': ['AWS::AppSync::'], 30 'athena': ['AWS::Athena::'], 31 'autoscaling': ['AWS::AutoScaling::'], 32 'batch': ['AWS::Batch::'], 33 'budgets': ['AWS::Budgets::'], 34 'cloud9': ['AWS::Cloud9::'], 35 'cloudfront': ['AWS::CloudFront::'], 36 'cloudtrail': ['AWS::CloudTrail::'], 37 'cloudwatch': ['AWS::CloudWatch::'], 38 'codebuild': ['AWS::CodeBuild::'], 39 'codecommit': ['AWS::CodeCommit::'], 40 'codedeploy': ['AWS::CodeDeploy::'], 41 'codepipeline': ['AWS::CodePipeline::'], 42 'cognito-identity': ['AWS::Cognito::'], 43 'config': ['AWS::Config::'], 44 'datapipeline': ['AWS::DataPipeline::'], 45 'dax': ['AWS::DAX::'], 46 'dms': ['AWS::DMS::'], 47 'docdb': ['AWS::DocDB::'], 48 'ds': ['AWS::DirectoryService::'], 49 'dynamodb': ['AWS::DynamoDB::'], 50 'ec2': ['AWS::EC2::'], 51 'ecr': ['AWS::ECR::'], 52 'ecs': ['AWS::ECS::'], 53 'efs': ['AWS::EFS::'], 54 'eks': ['AWS::EKS::'], 55 'elasticache': ['AWS::ElastiCache::'], 56 'elasticbeanstalk': ['AWS::ElasticBeanstalk::'], 57 'elb': ['AWS::ElasticLoadBalancing::', 'AWS::ElasticLoadBalancingV2::'], 58 'emr': ['AWS::EMR::'], 59 'es': ['AWS::Elasticsearch::'], 60 'events': ['AWS::Events::'], 61 'firehose': ['AWS::KinesisFirehose::'], 62 'fsx': ['AWS::FSx::'], 63 'gamelift': ['AWS::GameLift::'], 64 'glue': ['AWS::Glue::'], 65 'greengrass': ['AWS::Greengrass::'], 66 'guardduty': ['AWS::GuardDuty::'], 67 'inspector': ['AWS::Inspector::'], 68 'iot': ['AWS::IoT::'], 69 'iot1click-projects': ['AWS::IoT1Click::'], 70 'iotanalytics': ['AWS::IoTAnalytics::'], 71 'kinesis': ['AWS::Kinesis::'], 72 'kinesisanalytics': ['AWS::KinesisAnalytics::', 'AWS::KinesisAnalyticsV2::'], 73 'kms': ['AWS::KMS::'], 74 'lambda': ['AWS::Lambda::'], 75 'logs': ['AWS::Logs::'], 76 'mq': ['AWS::AmazonMQ::'], 77 'neptune': ['AWS::Neptune::'], 78 'opsworks': ['AWS::OpsWorks::'], 79 'opsworkscm': ['AWS::OpsWorksCM::'], 80 'ram': ['AWS::RAM::'], 81 'rds': ['AWS::RDS::'], 82 'redshift': ['AWS::Redshift::'], 83 'robomaker': ['AWS::RoboMaker::'], 84 'route53': ['AWS::Route53::'], 85 'route53resolver': ['AWS::Route53Resolver::ResolverRule', 'AWS::Route53Resolver::ResolverEndpoint'], 86 's3': ['AWS::S3::'], 87 'sagemaker': ['AWS::SageMaker::'], 88 'sdb': ['AWS::SDB::'], 89 'secretsmanager': ['AWS::SecretsManager::'], 90 'servicecatalog': ['AWS::ServiceCatalog::'], 91 'servicediscovery': ['AWS::ServiceDiscovery::'], 92 'ses': ['AWS::SES::'], 93 'sns': ['AWS::SNS::'], 94 'sqs': ['AWS::SQS::'], 95 'ssm': ['AWS::SSM::'], 96 'stepfunctions': ['AWS::StepFunctions::'], 97 'waf-regional': ['AWS::WAFRegional::'], 98 'workspaces': ['AWS::WorkSpaces::'], 99 } 100 101 session = boto3.session.Session() 102 client = session.client('ssm', region_name='us-east-1') 103 104 105 def configure_logging(): 106 """Setup Logging""" 107 ch = logging.StreamHandler() 108 ch.setLevel(logging.INFO) 109 110 LOGGER.setLevel(logging.INFO) 111 log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 112 ch.setFormatter(log_formatter) 113 114 # make sure all other log handlers are removed before adding it back 115 for handler in LOGGER.handlers: 116 LOGGER.removeHandler(handler) 117 LOGGER.addHandler(ch) 118 119 120 def update_outputs(region, resource_type, name, outputs): 121 """ update outputs with appropriate results """ 122 element = { 123 "op": "remove", 124 "path": "/%s/%s" % (resource_type, name) 125 } 126 outputs[region].append(element) 127 128 return outputs 129 130 131 def get_regions_for_service(service): 132 """ get regions for a service """ 133 LOGGER.info('Get the regions for service %s', service) 134 results = [] 135 paginator = client.get_paginator('get_parameters_by_path') 136 page_iterator = paginator.paginate( 137 Path='/aws/service/global-infrastructure/services/{}/regions'.format(service), 138 ) 139 140 for page in page_iterator: 141 for region in page.get('Parameters'): 142 results.append(region.get('Value')) 143 144 return results 145 146 147 def add_spec_patch(region, services): 148 """ Go through spec and determine patching """ 149 LOGGER.info('Create 06_ssm_service_removal patch for region %s', region) 150 spec = json.loads(get_url_content(SPEC_REGIONS.get(region))) 151 152 patches = [] 153 154 for spec_type in ['ResourceTypes', 'PropertyTypes']: 155 for resource in sorted(spec.get(spec_type).keys()): 156 for service in services: 157 for spec_name in service_map.get(service): 158 if resource.startswith(spec_name): 159 element = { 160 'op': 'remove', 161 'path': '/%s/%s' % (spec_type, resource) 162 } 163 patches.append(element) 164 165 filename = 'src/cfnlint/data/ExtendedSpecs/%s/06_ssm_service_removal.json' % region 166 with open(filename, 'w+') as f: 167 json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': ')) 168 169 170 def add_spec_missing_services_patch(region, services): 171 """ Go through spec and determine patching """ 172 LOGGER.info('Create 07_ssm_service_addition patch for region %s', region) 173 spec_string = get_url_content(SPEC_REGIONS.get(region)) 174 spec_string_standard = get_url_content(SPEC_REGIONS.get('us-east-1')) 175 176 spec = json.loads(spec_string) 177 spec_standard = json.loads(spec_string_standard) 178 179 patches = [] 180 181 for spec_type in ['ResourceTypes']: 182 for service in services: 183 found = False 184 for resource in sorted(spec.get(spec_type).keys()): 185 for spec_name in service_map.get(service): 186 if resource.startswith(spec_name): 187 found = True 188 if found is False: 189 for standard_spec_type in ['ResourceTypes', 'PropertyTypes']: 190 for resource in sorted(spec_standard.get(standard_spec_type).keys()): 191 for spec_name in service_map.get(service): 192 if resource.startswith(spec_name): 193 if spec_standard.get(spec_type).get(resource): 194 element = { 195 'op': 'add', 196 'path': '/%s/%s' % (spec_type, resource), 197 'value': spec_standard.get(spec_type).get(resource) 198 } 199 patches.append(element) 200 elif standard_spec_type == 'ResourceTypes': 201 print('patch for %s not found' % service) 202 203 if patches: 204 filename = 'src/cfnlint/data/ExtendedSpecs/%s/07_ssm_service_addition.json' % region 205 with open(filename, 'w+') as f: 206 json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': ')) 207 208 209 def main(): 210 """ main function """ 211 configure_logging() 212 213 all_regions = list(set(REGIONS) - set(exclude_regions.values())) 214 region_service_removal_map = {} 215 region_service_add_map = {} 216 for region in all_regions: 217 region_service_removal_map[region] = [] 218 region_service_add_map[region] = [] 219 for service in service_map: 220 regions = get_regions_for_service(service) 221 if regions: 222 for region in list(set(regions) - set(exclude_regions.values())): 223 region_service_add_map[region].append(service) 224 for region in list(set(all_regions) - set(regions)): 225 region_service_removal_map[region].append(service) 226 227 for region, services in region_service_removal_map.items(): 228 if services: 229 add_spec_patch(region, services) 230 for region, services in region_service_add_map.items(): 231 if services: 232 add_spec_missing_services_patch(region, services) 233 234 235 if __name__ == '__main__': 236 try: 237 main() 238 except (ValueError, TypeError): 239 LOGGER.error(ValueError) 240 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/update_specs_services_from_ssm.py b/scripts/update_specs_services_from_ssm.py --- a/scripts/update_specs_services_from_ssm.py +++ b/scripts/update_specs_services_from_ssm.py @@ -24,7 +24,7 @@ service_map = { 'acm': ['AWS::CertificateManager::'], 'apigateway': ['AWS::ApiGateway::', 'AWS::ApiGatewayV2::'], - 'application-autoscaling': ['AWS::ApplicationAutoScaling::'], + # 'application-autoscaling': ['AWS::ApplicationAutoScaling::'], ## remove because SSM endpoints aren't correct 'appstream': ['AWS::AppStream::'], 'appsync': ['AWS::AppSync::'], 'athena': ['AWS::Athena::'], @@ -190,11 +190,11 @@ for resource in sorted(spec_standard.get(standard_spec_type).keys()): for spec_name in service_map.get(service): if resource.startswith(spec_name): - if spec_standard.get(spec_type).get(resource): + if spec_standard.get(standard_spec_type).get(resource): element = { 'op': 'add', - 'path': '/%s/%s' % (spec_type, resource), - 'value': spec_standard.get(spec_type).get(resource) + 'path': '/%s/%s' % (standard_spec_type, resource), + 'value': spec_standard.get(standard_spec_type).get(resource) } patches.append(element) elif standard_spec_type == 'ResourceTypes':
{"golden_diff": "diff --git a/scripts/update_specs_services_from_ssm.py b/scripts/update_specs_services_from_ssm.py\n--- a/scripts/update_specs_services_from_ssm.py\n+++ b/scripts/update_specs_services_from_ssm.py\n@@ -24,7 +24,7 @@\n service_map = {\n 'acm': ['AWS::CertificateManager::'],\n 'apigateway': ['AWS::ApiGateway::', 'AWS::ApiGatewayV2::'],\n- 'application-autoscaling': ['AWS::ApplicationAutoScaling::'],\n+ # 'application-autoscaling': ['AWS::ApplicationAutoScaling::'], ## remove because SSM endpoints aren't correct\n 'appstream': ['AWS::AppStream::'],\n 'appsync': ['AWS::AppSync::'],\n 'athena': ['AWS::Athena::'],\n@@ -190,11 +190,11 @@\n for resource in sorted(spec_standard.get(standard_spec_type).keys()):\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n- if spec_standard.get(spec_type).get(resource):\n+ if spec_standard.get(standard_spec_type).get(resource):\n element = {\n 'op': 'add',\n- 'path': '/%s/%s' % (spec_type, resource),\n- 'value': spec_standard.get(spec_type).get(resource)\n+ 'path': '/%s/%s' % (standard_spec_type, resource),\n+ 'value': spec_standard.get(standard_spec_type).get(resource)\n }\n patches.append(element)\n elif standard_spec_type == 'ResourceTypes':\n", "issue": "Exceptions in E3002 and E3003 when evaluating AWS::StepFunctions::StateMachine.TagsEntry for eu-west-2\nGiven the following template:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\nTransform: AWS::Serverless-2016-10-31\r\n\r\nResources:\r\n StateMachineRole:\r\n Type: AWS::IAM::Role\r\n Properties:\r\n AssumeRolePolicyDocument:\r\n Version: '2012-10-17'\r\n Statement:\r\n - Effect: Allow\r\n Principal:\r\n Service: states.amazonaws.com\r\n Action: sts:AssumeRole\r\n \r\n StateMachine:\r\n Type: AWS::Serverless::StateMachine\r\n Properties:\r\n Definition:\r\n StartAt: Example\r\n Role: !GetAtt StateMachineRole.Arn\r\n```\r\n\r\nThe following linting errors are output when using cfn-lint 0.35.1 with the eu-west-2 region specified:\r\n\r\n```\r\nE0002 Unknown exception while processing rule E3002: 'AWS::StepFunctions::StateMachine.TagsEntry'\r\nexample.yml:1:1\r\n\r\nE0002 Unknown exception while processing rule E3003: 'AWS::StepFunctions::StateMachine.TagsEntry'\r\nexample.yml:1:1\r\n```\r\n\r\nThe errors are not shown when specifying the region as eu-west-1 or us-east-1.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport logging\nimport json\nimport boto3\nfrom cfnlint.helpers import get_url_content\nfrom cfnlint.helpers import REGIONS\nfrom cfnlint.maintenance import SPEC_REGIONS\n\n\"\"\"\n Updates our dynamic patches from SSM data\n This script requires Boto3 and Credentials to call the SSM API\n\"\"\"\n\nLOGGER = logging.getLogger('cfnlint')\n\nexclude_regions = {\n 'Asia Pacific (Osaka-Local)': 'ap-northeast-3',\n}\n\nservice_map = {\n 'acm': ['AWS::CertificateManager::'],\n 'apigateway': ['AWS::ApiGateway::', 'AWS::ApiGatewayV2::'],\n 'application-autoscaling': ['AWS::ApplicationAutoScaling::'],\n 'appstream': ['AWS::AppStream::'],\n 'appsync': ['AWS::AppSync::'],\n 'athena': ['AWS::Athena::'],\n 'autoscaling': ['AWS::AutoScaling::'],\n 'batch': ['AWS::Batch::'],\n 'budgets': ['AWS::Budgets::'],\n 'cloud9': ['AWS::Cloud9::'],\n 'cloudfront': ['AWS::CloudFront::'],\n 'cloudtrail': ['AWS::CloudTrail::'],\n 'cloudwatch': ['AWS::CloudWatch::'],\n 'codebuild': ['AWS::CodeBuild::'],\n 'codecommit': ['AWS::CodeCommit::'],\n 'codedeploy': ['AWS::CodeDeploy::'],\n 'codepipeline': ['AWS::CodePipeline::'],\n 'cognito-identity': ['AWS::Cognito::'],\n 'config': ['AWS::Config::'],\n 'datapipeline': ['AWS::DataPipeline::'],\n 'dax': ['AWS::DAX::'],\n 'dms': ['AWS::DMS::'],\n 'docdb': ['AWS::DocDB::'],\n 'ds': ['AWS::DirectoryService::'],\n 'dynamodb': ['AWS::DynamoDB::'],\n 'ec2': ['AWS::EC2::'],\n 'ecr': ['AWS::ECR::'],\n 'ecs': ['AWS::ECS::'],\n 'efs': ['AWS::EFS::'],\n 'eks': ['AWS::EKS::'],\n 'elasticache': ['AWS::ElastiCache::'],\n 'elasticbeanstalk': ['AWS::ElasticBeanstalk::'],\n 'elb': ['AWS::ElasticLoadBalancing::', 'AWS::ElasticLoadBalancingV2::'],\n 'emr': ['AWS::EMR::'],\n 'es': ['AWS::Elasticsearch::'],\n 'events': ['AWS::Events::'],\n 'firehose': ['AWS::KinesisFirehose::'],\n 'fsx': ['AWS::FSx::'],\n 'gamelift': ['AWS::GameLift::'],\n 'glue': ['AWS::Glue::'],\n 'greengrass': ['AWS::Greengrass::'],\n 'guardduty': ['AWS::GuardDuty::'],\n 'inspector': ['AWS::Inspector::'],\n 'iot': ['AWS::IoT::'],\n 'iot1click-projects': ['AWS::IoT1Click::'],\n 'iotanalytics': ['AWS::IoTAnalytics::'],\n 'kinesis': ['AWS::Kinesis::'],\n 'kinesisanalytics': ['AWS::KinesisAnalytics::', 'AWS::KinesisAnalyticsV2::'],\n 'kms': ['AWS::KMS::'],\n 'lambda': ['AWS::Lambda::'],\n 'logs': ['AWS::Logs::'],\n 'mq': ['AWS::AmazonMQ::'],\n 'neptune': ['AWS::Neptune::'],\n 'opsworks': ['AWS::OpsWorks::'],\n 'opsworkscm': ['AWS::OpsWorksCM::'],\n 'ram': ['AWS::RAM::'],\n 'rds': ['AWS::RDS::'],\n 'redshift': ['AWS::Redshift::'],\n 'robomaker': ['AWS::RoboMaker::'],\n 'route53': ['AWS::Route53::'],\n 'route53resolver': ['AWS::Route53Resolver::ResolverRule', 'AWS::Route53Resolver::ResolverEndpoint'],\n 's3': ['AWS::S3::'],\n 'sagemaker': ['AWS::SageMaker::'],\n 'sdb': ['AWS::SDB::'],\n 'secretsmanager': ['AWS::SecretsManager::'],\n 'servicecatalog': ['AWS::ServiceCatalog::'],\n 'servicediscovery': ['AWS::ServiceDiscovery::'],\n 'ses': ['AWS::SES::'],\n 'sns': ['AWS::SNS::'],\n 'sqs': ['AWS::SQS::'],\n 'ssm': ['AWS::SSM::'],\n 'stepfunctions': ['AWS::StepFunctions::'],\n 'waf-regional': ['AWS::WAFRegional::'],\n 'workspaces': ['AWS::WorkSpaces::'],\n}\n\nsession = boto3.session.Session()\nclient = session.client('ssm', region_name='us-east-1')\n\n\ndef configure_logging():\n \"\"\"Setup Logging\"\"\"\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n LOGGER.setLevel(logging.INFO)\n log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(log_formatter)\n\n # make sure all other log handlers are removed before adding it back\n for handler in LOGGER.handlers:\n LOGGER.removeHandler(handler)\n LOGGER.addHandler(ch)\n\n\ndef update_outputs(region, resource_type, name, outputs):\n \"\"\" update outputs with appropriate results \"\"\"\n element = {\n \"op\": \"remove\",\n \"path\": \"/%s/%s\" % (resource_type, name)\n }\n outputs[region].append(element)\n\n return outputs\n\n\ndef get_regions_for_service(service):\n \"\"\" get regions for a service \"\"\"\n LOGGER.info('Get the regions for service %s', service)\n results = []\n paginator = client.get_paginator('get_parameters_by_path')\n page_iterator = paginator.paginate(\n Path='/aws/service/global-infrastructure/services/{}/regions'.format(service),\n )\n\n for page in page_iterator:\n for region in page.get('Parameters'):\n results.append(region.get('Value'))\n\n return results\n\n\ndef add_spec_patch(region, services):\n \"\"\" Go through spec and determine patching \"\"\"\n LOGGER.info('Create 06_ssm_service_removal patch for region %s', region)\n spec = json.loads(get_url_content(SPEC_REGIONS.get(region)))\n\n patches = []\n\n for spec_type in ['ResourceTypes', 'PropertyTypes']:\n for resource in sorted(spec.get(spec_type).keys()):\n for service in services:\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n element = {\n 'op': 'remove',\n 'path': '/%s/%s' % (spec_type, resource)\n }\n patches.append(element)\n\n filename = 'src/cfnlint/data/ExtendedSpecs/%s/06_ssm_service_removal.json' % region\n with open(filename, 'w+') as f:\n json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': '))\n\n\ndef add_spec_missing_services_patch(region, services):\n \"\"\" Go through spec and determine patching \"\"\"\n LOGGER.info('Create 07_ssm_service_addition patch for region %s', region)\n spec_string = get_url_content(SPEC_REGIONS.get(region))\n spec_string_standard = get_url_content(SPEC_REGIONS.get('us-east-1'))\n\n spec = json.loads(spec_string)\n spec_standard = json.loads(spec_string_standard)\n\n patches = []\n\n for spec_type in ['ResourceTypes']:\n for service in services:\n found = False\n for resource in sorted(spec.get(spec_type).keys()):\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n found = True\n if found is False:\n for standard_spec_type in ['ResourceTypes', 'PropertyTypes']:\n for resource in sorted(spec_standard.get(standard_spec_type).keys()):\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n if spec_standard.get(spec_type).get(resource):\n element = {\n 'op': 'add',\n 'path': '/%s/%s' % (spec_type, resource),\n 'value': spec_standard.get(spec_type).get(resource)\n }\n patches.append(element)\n elif standard_spec_type == 'ResourceTypes':\n print('patch for %s not found' % service)\n\n if patches:\n filename = 'src/cfnlint/data/ExtendedSpecs/%s/07_ssm_service_addition.json' % region\n with open(filename, 'w+') as f:\n json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': '))\n\n\ndef main():\n \"\"\" main function \"\"\"\n configure_logging()\n\n all_regions = list(set(REGIONS) - set(exclude_regions.values()))\n region_service_removal_map = {}\n region_service_add_map = {}\n for region in all_regions:\n region_service_removal_map[region] = []\n region_service_add_map[region] = []\n for service in service_map:\n regions = get_regions_for_service(service)\n if regions:\n for region in list(set(regions) - set(exclude_regions.values())):\n region_service_add_map[region].append(service)\n for region in list(set(all_regions) - set(regions)):\n region_service_removal_map[region].append(service)\n\n for region, services in region_service_removal_map.items():\n if services:\n add_spec_patch(region, services)\n for region, services in region_service_add_map.items():\n if services:\n add_spec_missing_services_patch(region, services)\n\n\nif __name__ == '__main__':\n try:\n main()\n except (ValueError, TypeError):\n LOGGER.error(ValueError)\n", "path": "scripts/update_specs_services_from_ssm.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport logging\nimport json\nimport boto3\nfrom cfnlint.helpers import get_url_content\nfrom cfnlint.helpers import REGIONS\nfrom cfnlint.maintenance import SPEC_REGIONS\n\n\"\"\"\n Updates our dynamic patches from SSM data\n This script requires Boto3 and Credentials to call the SSM API\n\"\"\"\n\nLOGGER = logging.getLogger('cfnlint')\n\nexclude_regions = {\n 'Asia Pacific (Osaka-Local)': 'ap-northeast-3',\n}\n\nservice_map = {\n 'acm': ['AWS::CertificateManager::'],\n 'apigateway': ['AWS::ApiGateway::', 'AWS::ApiGatewayV2::'],\n # 'application-autoscaling': ['AWS::ApplicationAutoScaling::'], ## remove because SSM endpoints aren't correct\n 'appstream': ['AWS::AppStream::'],\n 'appsync': ['AWS::AppSync::'],\n 'athena': ['AWS::Athena::'],\n 'autoscaling': ['AWS::AutoScaling::'],\n 'batch': ['AWS::Batch::'],\n 'budgets': ['AWS::Budgets::'],\n 'cloud9': ['AWS::Cloud9::'],\n 'cloudfront': ['AWS::CloudFront::'],\n 'cloudtrail': ['AWS::CloudTrail::'],\n 'cloudwatch': ['AWS::CloudWatch::'],\n 'codebuild': ['AWS::CodeBuild::'],\n 'codecommit': ['AWS::CodeCommit::'],\n 'codedeploy': ['AWS::CodeDeploy::'],\n 'codepipeline': ['AWS::CodePipeline::'],\n 'cognito-identity': ['AWS::Cognito::'],\n 'config': ['AWS::Config::'],\n 'datapipeline': ['AWS::DataPipeline::'],\n 'dax': ['AWS::DAX::'],\n 'dms': ['AWS::DMS::'],\n 'docdb': ['AWS::DocDB::'],\n 'ds': ['AWS::DirectoryService::'],\n 'dynamodb': ['AWS::DynamoDB::'],\n 'ec2': ['AWS::EC2::'],\n 'ecr': ['AWS::ECR::'],\n 'ecs': ['AWS::ECS::'],\n 'efs': ['AWS::EFS::'],\n 'eks': ['AWS::EKS::'],\n 'elasticache': ['AWS::ElastiCache::'],\n 'elasticbeanstalk': ['AWS::ElasticBeanstalk::'],\n 'elb': ['AWS::ElasticLoadBalancing::', 'AWS::ElasticLoadBalancingV2::'],\n 'emr': ['AWS::EMR::'],\n 'es': ['AWS::Elasticsearch::'],\n 'events': ['AWS::Events::'],\n 'firehose': ['AWS::KinesisFirehose::'],\n 'fsx': ['AWS::FSx::'],\n 'gamelift': ['AWS::GameLift::'],\n 'glue': ['AWS::Glue::'],\n 'greengrass': ['AWS::Greengrass::'],\n 'guardduty': ['AWS::GuardDuty::'],\n 'inspector': ['AWS::Inspector::'],\n 'iot': ['AWS::IoT::'],\n 'iot1click-projects': ['AWS::IoT1Click::'],\n 'iotanalytics': ['AWS::IoTAnalytics::'],\n 'kinesis': ['AWS::Kinesis::'],\n 'kinesisanalytics': ['AWS::KinesisAnalytics::', 'AWS::KinesisAnalyticsV2::'],\n 'kms': ['AWS::KMS::'],\n 'lambda': ['AWS::Lambda::'],\n 'logs': ['AWS::Logs::'],\n 'mq': ['AWS::AmazonMQ::'],\n 'neptune': ['AWS::Neptune::'],\n 'opsworks': ['AWS::OpsWorks::'],\n 'opsworkscm': ['AWS::OpsWorksCM::'],\n 'ram': ['AWS::RAM::'],\n 'rds': ['AWS::RDS::'],\n 'redshift': ['AWS::Redshift::'],\n 'robomaker': ['AWS::RoboMaker::'],\n 'route53': ['AWS::Route53::'],\n 'route53resolver': ['AWS::Route53Resolver::ResolverRule', 'AWS::Route53Resolver::ResolverEndpoint'],\n 's3': ['AWS::S3::'],\n 'sagemaker': ['AWS::SageMaker::'],\n 'sdb': ['AWS::SDB::'],\n 'secretsmanager': ['AWS::SecretsManager::'],\n 'servicecatalog': ['AWS::ServiceCatalog::'],\n 'servicediscovery': ['AWS::ServiceDiscovery::'],\n 'ses': ['AWS::SES::'],\n 'sns': ['AWS::SNS::'],\n 'sqs': ['AWS::SQS::'],\n 'ssm': ['AWS::SSM::'],\n 'stepfunctions': ['AWS::StepFunctions::'],\n 'waf-regional': ['AWS::WAFRegional::'],\n 'workspaces': ['AWS::WorkSpaces::'],\n}\n\nsession = boto3.session.Session()\nclient = session.client('ssm', region_name='us-east-1')\n\n\ndef configure_logging():\n \"\"\"Setup Logging\"\"\"\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n LOGGER.setLevel(logging.INFO)\n log_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(log_formatter)\n\n # make sure all other log handlers are removed before adding it back\n for handler in LOGGER.handlers:\n LOGGER.removeHandler(handler)\n LOGGER.addHandler(ch)\n\n\ndef update_outputs(region, resource_type, name, outputs):\n \"\"\" update outputs with appropriate results \"\"\"\n element = {\n \"op\": \"remove\",\n \"path\": \"/%s/%s\" % (resource_type, name)\n }\n outputs[region].append(element)\n\n return outputs\n\n\ndef get_regions_for_service(service):\n \"\"\" get regions for a service \"\"\"\n LOGGER.info('Get the regions for service %s', service)\n results = []\n paginator = client.get_paginator('get_parameters_by_path')\n page_iterator = paginator.paginate(\n Path='/aws/service/global-infrastructure/services/{}/regions'.format(service),\n )\n\n for page in page_iterator:\n for region in page.get('Parameters'):\n results.append(region.get('Value'))\n\n return results\n\n\ndef add_spec_patch(region, services):\n \"\"\" Go through spec and determine patching \"\"\"\n LOGGER.info('Create 06_ssm_service_removal patch for region %s', region)\n spec = json.loads(get_url_content(SPEC_REGIONS.get(region)))\n\n patches = []\n\n for spec_type in ['ResourceTypes', 'PropertyTypes']:\n for resource in sorted(spec.get(spec_type).keys()):\n for service in services:\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n element = {\n 'op': 'remove',\n 'path': '/%s/%s' % (spec_type, resource)\n }\n patches.append(element)\n\n filename = 'src/cfnlint/data/ExtendedSpecs/%s/06_ssm_service_removal.json' % region\n with open(filename, 'w+') as f:\n json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': '))\n\n\ndef add_spec_missing_services_patch(region, services):\n \"\"\" Go through spec and determine patching \"\"\"\n LOGGER.info('Create 07_ssm_service_addition patch for region %s', region)\n spec_string = get_url_content(SPEC_REGIONS.get(region))\n spec_string_standard = get_url_content(SPEC_REGIONS.get('us-east-1'))\n\n spec = json.loads(spec_string)\n spec_standard = json.loads(spec_string_standard)\n\n patches = []\n\n for spec_type in ['ResourceTypes']:\n for service in services:\n found = False\n for resource in sorted(spec.get(spec_type).keys()):\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n found = True\n if found is False:\n for standard_spec_type in ['ResourceTypes', 'PropertyTypes']:\n for resource in sorted(spec_standard.get(standard_spec_type).keys()):\n for spec_name in service_map.get(service):\n if resource.startswith(spec_name):\n if spec_standard.get(standard_spec_type).get(resource):\n element = {\n 'op': 'add',\n 'path': '/%s/%s' % (standard_spec_type, resource),\n 'value': spec_standard.get(standard_spec_type).get(resource)\n }\n patches.append(element)\n elif standard_spec_type == 'ResourceTypes':\n print('patch for %s not found' % service)\n\n if patches:\n filename = 'src/cfnlint/data/ExtendedSpecs/%s/07_ssm_service_addition.json' % region\n with open(filename, 'w+') as f:\n json.dump(patches, f, indent=2, sort_keys=True, separators=(',', ': '))\n\n\ndef main():\n \"\"\" main function \"\"\"\n configure_logging()\n\n all_regions = list(set(REGIONS) - set(exclude_regions.values()))\n region_service_removal_map = {}\n region_service_add_map = {}\n for region in all_regions:\n region_service_removal_map[region] = []\n region_service_add_map[region] = []\n for service in service_map:\n regions = get_regions_for_service(service)\n if regions:\n for region in list(set(regions) - set(exclude_regions.values())):\n region_service_add_map[region].append(service)\n for region in list(set(all_regions) - set(regions)):\n region_service_removal_map[region].append(service)\n\n for region, services in region_service_removal_map.items():\n if services:\n add_spec_patch(region, services)\n for region, services in region_service_add_map.items():\n if services:\n add_spec_missing_services_patch(region, services)\n\n\nif __name__ == '__main__':\n try:\n main()\n except (ValueError, TypeError):\n LOGGER.error(ValueError)\n", "path": "scripts/update_specs_services_from_ssm.py"}]}
3,365
343
gh_patches_debug_33135
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-3137
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Prometheus exporter: consider removing the ability to set a global metric prefix The [current implementation](https://github.com/open-telemetry/opentelemetry-python/blob/main/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py#L120) allows specifying a prefix which is globally applied to all metrics for the exporter. From the [OpenMetrics spec on target metadata](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems): > Exposers MUST NOT prefix MetricFamily names or otherwise vary MetricFamily names based on target metadata. > ... > The metric names MUST NOT be varied based on target metadata. For example it would be incorrect for all metrics to end up being prefixed with staging_ even if they all originated from targets in a staging environment). The gist is that name prefixes shouldn't be used to differentiate instances of an application being scraped, which appears to be what we are doing. It isn't entirely clear to me what the original purpose of the prefix was, but if it is to prevent collisions between instrumentation libraries, we should solve that with https://github.com/open-telemetry/opentelemetry-python/issues/3072 instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This library allows export of metrics data to `Prometheus <https://prometheus.io/>`_. 17 18 Usage 19 ----- 20 21 The **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_ 22 metrics to `Prometheus`_. 23 24 25 .. _Prometheus: https://prometheus.io/ 26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ 27 28 .. code:: python 29 30 from prometheus_client import start_http_server 31 32 from opentelemetry.exporter.prometheus import PrometheusMetricReader 33 from opentelemetry.metrics import get_meter_provider, set_meter_provider 34 from opentelemetry.sdk.metrics import MeterProvider 35 36 # Start Prometheus client 37 start_http_server(port=8000, addr="localhost") 38 39 # Exporter to export metrics to Prometheus 40 prefix = "MyAppPrefix" 41 reader = PrometheusMetricReader(prefix) 42 43 # Meter is responsible for creating and recording metrics 44 set_meter_provider(MeterProvider(metric_readers=[reader])) 45 meter = get_meter_provider().get_meter("myapp", "0.1.2") 46 47 counter = meter.create_counter( 48 "requests", 49 "requests", 50 "number of requests", 51 ) 52 53 # Labels are used to identify key-values that are associated with a specific 54 # metric that you want to record. These are useful for pre-aggregation and can 55 # be used to store custom dimensions pertaining to a metric 56 labels = {"environment": "staging"} 57 58 counter.add(25, labels) 59 input("Press any key to exit...") 60 61 API 62 --- 63 """ 64 65 from collections import deque 66 from itertools import chain 67 from json import dumps 68 from logging import getLogger 69 from re import IGNORECASE, UNICODE, compile 70 from typing import Dict, Sequence, Tuple, Union 71 72 from prometheus_client.core import ( 73 REGISTRY, 74 CounterMetricFamily, 75 GaugeMetricFamily, 76 HistogramMetricFamily, 77 ) 78 from prometheus_client.core import Metric as PrometheusMetric 79 80 from opentelemetry.sdk.metrics import Counter 81 from opentelemetry.sdk.metrics import Histogram as HistogramInstrument 82 from opentelemetry.sdk.metrics import ( 83 ObservableCounter, 84 ObservableGauge, 85 ObservableUpDownCounter, 86 UpDownCounter, 87 ) 88 from opentelemetry.sdk.metrics.export import ( 89 AggregationTemporality, 90 Gauge, 91 Histogram, 92 HistogramDataPoint, 93 MetricReader, 94 MetricsData, 95 Sum, 96 ) 97 98 _logger = getLogger(__name__) 99 100 101 def _convert_buckets( 102 bucket_counts: Sequence[int], explicit_bounds: Sequence[float] 103 ) -> Sequence[Tuple[str, int]]: 104 buckets = [] 105 total_count = 0 106 for upper_bound, count in zip( 107 chain(explicit_bounds, ["+Inf"]), 108 bucket_counts, 109 ): 110 total_count += count 111 buckets.append((f"{upper_bound}", total_count)) 112 113 return buckets 114 115 116 class PrometheusMetricReader(MetricReader): 117 """Prometheus metric exporter for OpenTelemetry. 118 119 Args: 120 prefix: single-word application prefix relevant to the domain 121 the metric belongs to. 122 """ 123 124 def __init__(self, prefix: str = "") -> None: 125 126 super().__init__( 127 preferred_temporality={ 128 Counter: AggregationTemporality.CUMULATIVE, 129 UpDownCounter: AggregationTemporality.CUMULATIVE, 130 HistogramInstrument: AggregationTemporality.CUMULATIVE, 131 ObservableCounter: AggregationTemporality.CUMULATIVE, 132 ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, 133 ObservableGauge: AggregationTemporality.CUMULATIVE, 134 } 135 ) 136 self._collector = _CustomCollector(prefix) 137 REGISTRY.register(self._collector) 138 self._collector._callback = self.collect 139 140 def _receive_metrics( 141 self, 142 metrics_data: MetricsData, 143 timeout_millis: float = 10_000, 144 **kwargs, 145 ) -> None: 146 if metrics_data is None: 147 return 148 self._collector.add_metrics_data(metrics_data) 149 150 def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: 151 REGISTRY.unregister(self._collector) 152 153 154 class _CustomCollector: 155 """_CustomCollector represents the Prometheus Collector object 156 157 See more: 158 https://github.com/prometheus/client_python#custom-collectors 159 """ 160 161 def __init__(self, prefix: str = ""): 162 self._prefix = prefix 163 self._callback = None 164 self._metrics_datas = deque() 165 self._non_letters_digits_underscore_re = compile( 166 r"[^\w]", UNICODE | IGNORECASE 167 ) 168 169 def add_metrics_data(self, metrics_data: MetricsData) -> None: 170 """Add metrics to Prometheus data""" 171 self._metrics_datas.append(metrics_data) 172 173 def collect(self) -> None: 174 """Collect fetches the metrics from OpenTelemetry 175 and delivers them as Prometheus Metrics. 176 Collect is invoked every time a ``prometheus.Gatherer`` is run 177 for example when the HTTP endpoint is invoked by Prometheus. 178 """ 179 if self._callback is not None: 180 self._callback() 181 182 metric_family_id_metric_family = {} 183 184 while self._metrics_datas: 185 self._translate_to_prometheus( 186 self._metrics_datas.popleft(), metric_family_id_metric_family 187 ) 188 189 if metric_family_id_metric_family: 190 for metric_family in metric_family_id_metric_family.values(): 191 yield metric_family 192 193 # pylint: disable=too-many-locals,too-many-branches 194 def _translate_to_prometheus( 195 self, 196 metrics_data: MetricsData, 197 metric_family_id_metric_family: Dict[str, PrometheusMetric], 198 ): 199 metrics = [] 200 201 for resource_metrics in metrics_data.resource_metrics: 202 for scope_metrics in resource_metrics.scope_metrics: 203 for metric in scope_metrics.metrics: 204 metrics.append(metric) 205 206 for metric in metrics: 207 label_valuess = [] 208 values = [] 209 210 pre_metric_family_ids = [] 211 212 metric_name = "" 213 if self._prefix != "": 214 metric_name = self._prefix + "_" 215 metric_name += self._sanitize(metric.name) 216 217 metric_description = metric.description or "" 218 219 for number_data_point in metric.data.data_points: 220 label_keys = [] 221 label_values = [] 222 223 for key, value in number_data_point.attributes.items(): 224 label_keys.append(self._sanitize(key)) 225 label_values.append(self._check_value(value)) 226 227 pre_metric_family_ids.append( 228 "|".join( 229 [ 230 metric_name, 231 metric_description, 232 "%".join(label_keys), 233 metric.unit, 234 ] 235 ) 236 ) 237 238 label_valuess.append(label_values) 239 if isinstance(number_data_point, HistogramDataPoint): 240 values.append( 241 { 242 "bucket_counts": number_data_point.bucket_counts, 243 "explicit_bounds": ( 244 number_data_point.explicit_bounds 245 ), 246 "sum": number_data_point.sum, 247 } 248 ) 249 else: 250 values.append(number_data_point.value) 251 252 for pre_metric_family_id, label_values, value in zip( 253 pre_metric_family_ids, label_valuess, values 254 ): 255 if isinstance(metric.data, Sum): 256 257 metric_family_id = "|".join( 258 [pre_metric_family_id, CounterMetricFamily.__name__] 259 ) 260 261 if metric_family_id not in metric_family_id_metric_family: 262 metric_family_id_metric_family[ 263 metric_family_id 264 ] = CounterMetricFamily( 265 name=metric_name, 266 documentation=metric_description, 267 labels=label_keys, 268 unit=metric.unit, 269 ) 270 metric_family_id_metric_family[ 271 metric_family_id 272 ].add_metric(labels=label_values, value=value) 273 elif isinstance(metric.data, Gauge): 274 275 metric_family_id = "|".join( 276 [pre_metric_family_id, GaugeMetricFamily.__name__] 277 ) 278 279 if ( 280 metric_family_id 281 not in metric_family_id_metric_family.keys() 282 ): 283 metric_family_id_metric_family[ 284 metric_family_id 285 ] = GaugeMetricFamily( 286 name=metric_name, 287 documentation=metric_description, 288 labels=label_keys, 289 unit=metric.unit, 290 ) 291 metric_family_id_metric_family[ 292 metric_family_id 293 ].add_metric(labels=label_values, value=value) 294 elif isinstance(metric.data, Histogram): 295 296 metric_family_id = "|".join( 297 [pre_metric_family_id, HistogramMetricFamily.__name__] 298 ) 299 300 if ( 301 metric_family_id 302 not in metric_family_id_metric_family.keys() 303 ): 304 metric_family_id_metric_family[ 305 metric_family_id 306 ] = HistogramMetricFamily( 307 name=metric_name, 308 documentation=metric_description, 309 labels=label_keys, 310 unit=metric.unit, 311 ) 312 metric_family_id_metric_family[ 313 metric_family_id 314 ].add_metric( 315 labels=label_values, 316 buckets=_convert_buckets( 317 value["bucket_counts"], value["explicit_bounds"] 318 ), 319 sum_value=value["sum"], 320 ) 321 else: 322 _logger.warning( 323 "Unsupported metric data. %s", type(metric.data) 324 ) 325 326 def _sanitize(self, key: str) -> str: 327 """sanitize the given metric name or label according to Prometheus rule. 328 Replace all characters other than [A-Za-z0-9_] with '_'. 329 """ 330 return self._non_letters_digits_underscore_re.sub("_", key) 331 332 # pylint: disable=no-self-use 333 def _check_value(self, value: Union[int, float, str, Sequence]) -> str: 334 """Check the label value and return is appropriate representation""" 335 if not isinstance(value, str): 336 return dumps(value, default=str) 337 return str(value) 338 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -114,14 +114,9 @@ class PrometheusMetricReader(MetricReader): - """Prometheus metric exporter for OpenTelemetry. + """Prometheus metric exporter for OpenTelemetry.""" - Args: - prefix: single-word application prefix relevant to the domain - the metric belongs to. - """ - - def __init__(self, prefix: str = "") -> None: + def __init__(self) -> None: super().__init__( preferred_temporality={ @@ -133,7 +128,7 @@ ObservableGauge: AggregationTemporality.CUMULATIVE, } ) - self._collector = _CustomCollector(prefix) + self._collector = _CustomCollector() REGISTRY.register(self._collector) self._collector._callback = self.collect @@ -158,8 +153,7 @@ https://github.com/prometheus/client_python#custom-collectors """ - def __init__(self, prefix: str = ""): - self._prefix = prefix + def __init__(self): self._callback = None self._metrics_datas = deque() self._non_letters_digits_underscore_re = compile( @@ -210,8 +204,6 @@ pre_metric_family_ids = [] metric_name = "" - if self._prefix != "": - metric_name = self._prefix + "_" metric_name += self._sanitize(metric.name) metric_description = metric.description or ""
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n--- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n+++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n@@ -114,14 +114,9 @@\n \n \n class PrometheusMetricReader(MetricReader):\n- \"\"\"Prometheus metric exporter for OpenTelemetry.\n+ \"\"\"Prometheus metric exporter for OpenTelemetry.\"\"\"\n \n- Args:\n- prefix: single-word application prefix relevant to the domain\n- the metric belongs to.\n- \"\"\"\n-\n- def __init__(self, prefix: str = \"\") -> None:\n+ def __init__(self) -> None:\n \n super().__init__(\n preferred_temporality={\n@@ -133,7 +128,7 @@\n ObservableGauge: AggregationTemporality.CUMULATIVE,\n }\n )\n- self._collector = _CustomCollector(prefix)\n+ self._collector = _CustomCollector()\n REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n \n@@ -158,8 +153,7 @@\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n \n- def __init__(self, prefix: str = \"\"):\n- self._prefix = prefix\n+ def __init__(self):\n self._callback = None\n self._metrics_datas = deque()\n self._non_letters_digits_underscore_re = compile(\n@@ -210,8 +204,6 @@\n pre_metric_family_ids = []\n \n metric_name = \"\"\n- if self._prefix != \"\":\n- metric_name = self._prefix + \"_\"\n metric_name += self._sanitize(metric.name)\n \n metric_description = metric.description or \"\"\n", "issue": "Prometheus exporter: consider removing the ability to set a global metric prefix\nThe [current implementation](https://github.com/open-telemetry/opentelemetry-python/blob/main/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py#L120) allows specifying a prefix which is globally applied to all metrics for the exporter.\r\n\r\nFrom the [OpenMetrics spec on target metadata](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#supporting-target-metadata-in-both-push-based-and-pull-based-systems):\r\n\r\n> Exposers MUST NOT prefix MetricFamily names or otherwise vary MetricFamily names based on target metadata.\r\n> ...\r\n> The metric names MUST NOT be varied based on target metadata. For example it would be incorrect for all metrics to end up being prefixed with staging_ even if they all originated from targets in a staging environment).\r\n\r\nThe gist is that name prefixes shouldn't be used to differentiate instances of an application being scraped, which appears to be what we are doing. It isn't entirely clear to me what the original purpose of the prefix was, but if it is to prevent collisions between instrumentation libraries, we should solve that with https://github.com/open-telemetry/opentelemetry-python/issues/3072 instead.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.metrics import get_meter_provider, set_meter_provider\n from opentelemetry.sdk.metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nfrom collections import deque\nfrom itertools import chain\nfrom json import dumps\nfrom logging import getLogger\nfrom re import IGNORECASE, UNICODE, compile\nfrom typing import Dict, Sequence, Tuple, Union\n\nfrom prometheus_client.core import (\n REGISTRY,\n CounterMetricFamily,\n GaugeMetricFamily,\n HistogramMetricFamily,\n)\nfrom prometheus_client.core import Metric as PrometheusMetric\n\nfrom opentelemetry.sdk.metrics import Counter\nfrom opentelemetry.sdk.metrics import Histogram as HistogramInstrument\nfrom opentelemetry.sdk.metrics import (\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk.metrics.export import (\n AggregationTemporality,\n Gauge,\n Histogram,\n HistogramDataPoint,\n MetricReader,\n MetricsData,\n Sum,\n)\n\n_logger = getLogger(__name__)\n\n\ndef _convert_buckets(\n bucket_counts: Sequence[int], explicit_bounds: Sequence[float]\n) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(explicit_bounds, [\"+Inf\"]),\n bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\n\n Args:\n prefix: single-word application prefix relevant to the domain\n the metric belongs to.\n \"\"\"\n\n def __init__(self, prefix: str = \"\") -> None:\n\n super().__init__(\n preferred_temporality={\n Counter: AggregationTemporality.CUMULATIVE,\n UpDownCounter: AggregationTemporality.CUMULATIVE,\n HistogramInstrument: AggregationTemporality.CUMULATIVE,\n ObservableCounter: AggregationTemporality.CUMULATIVE,\n ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,\n ObservableGauge: AggregationTemporality.CUMULATIVE,\n }\n )\n self._collector = _CustomCollector(prefix)\n REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> None:\n if metrics_data is None:\n return\n self._collector.add_metrics_data(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n REGISTRY.unregister(self._collector)\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self, prefix: str = \"\"):\n self._prefix = prefix\n self._callback = None\n self._metrics_datas = deque()\n self._non_letters_digits_underscore_re = compile(\n r\"[^\\w]\", UNICODE | IGNORECASE\n )\n\n def add_metrics_data(self, metrics_data: MetricsData) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_datas.append(metrics_data)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n metric_family_id_metric_family = {}\n\n while self._metrics_datas:\n self._translate_to_prometheus(\n self._metrics_datas.popleft(), metric_family_id_metric_family\n )\n\n if metric_family_id_metric_family:\n for metric_family in metric_family_id_metric_family.values():\n yield metric_family\n\n # pylint: disable=too-many-locals,too-many-branches\n def _translate_to_prometheus(\n self,\n metrics_data: MetricsData,\n metric_family_id_metric_family: Dict[str, PrometheusMetric],\n ):\n metrics = []\n\n for resource_metrics in metrics_data.resource_metrics:\n for scope_metrics in resource_metrics.scope_metrics:\n for metric in scope_metrics.metrics:\n metrics.append(metric)\n\n for metric in metrics:\n label_valuess = []\n values = []\n\n pre_metric_family_ids = []\n\n metric_name = \"\"\n if self._prefix != \"\":\n metric_name = self._prefix + \"_\"\n metric_name += self._sanitize(metric.name)\n\n metric_description = metric.description or \"\"\n\n for number_data_point in metric.data.data_points:\n label_keys = []\n label_values = []\n\n for key, value in number_data_point.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(self._check_value(value))\n\n pre_metric_family_ids.append(\n \"|\".join(\n [\n metric_name,\n metric_description,\n \"%\".join(label_keys),\n metric.unit,\n ]\n )\n )\n\n label_valuess.append(label_values)\n if isinstance(number_data_point, HistogramDataPoint):\n values.append(\n {\n \"bucket_counts\": number_data_point.bucket_counts,\n \"explicit_bounds\": (\n number_data_point.explicit_bounds\n ),\n \"sum\": number_data_point.sum,\n }\n )\n else:\n values.append(number_data_point.value)\n\n for pre_metric_family_id, label_values, value in zip(\n pre_metric_family_ids, label_valuess, values\n ):\n if isinstance(metric.data, Sum):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, CounterMetricFamily.__name__]\n )\n\n if metric_family_id not in metric_family_id_metric_family:\n metric_family_id_metric_family[\n metric_family_id\n ] = CounterMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Gauge):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, GaugeMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = GaugeMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Histogram):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, HistogramMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = HistogramMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(\n labels=label_values,\n buckets=_convert_buckets(\n value[\"bucket_counts\"], value[\"explicit_bounds\"]\n ),\n sum_value=value[\"sum\"],\n )\n else:\n _logger.warning(\n \"Unsupported metric data. %s\", type(metric.data)\n )\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n if not isinstance(value, str):\n return dumps(value, default=str)\n return str(value)\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.metrics import get_meter_provider, set_meter_provider\n from opentelemetry.sdk.metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nfrom collections import deque\nfrom itertools import chain\nfrom json import dumps\nfrom logging import getLogger\nfrom re import IGNORECASE, UNICODE, compile\nfrom typing import Dict, Sequence, Tuple, Union\n\nfrom prometheus_client.core import (\n REGISTRY,\n CounterMetricFamily,\n GaugeMetricFamily,\n HistogramMetricFamily,\n)\nfrom prometheus_client.core import Metric as PrometheusMetric\n\nfrom opentelemetry.sdk.metrics import Counter\nfrom opentelemetry.sdk.metrics import Histogram as HistogramInstrument\nfrom opentelemetry.sdk.metrics import (\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk.metrics.export import (\n AggregationTemporality,\n Gauge,\n Histogram,\n HistogramDataPoint,\n MetricReader,\n MetricsData,\n Sum,\n)\n\n_logger = getLogger(__name__)\n\n\ndef _convert_buckets(\n bucket_counts: Sequence[int], explicit_bounds: Sequence[float]\n) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(explicit_bounds, [\"+Inf\"]),\n bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\"\"\"\n\n def __init__(self) -> None:\n\n super().__init__(\n preferred_temporality={\n Counter: AggregationTemporality.CUMULATIVE,\n UpDownCounter: AggregationTemporality.CUMULATIVE,\n HistogramInstrument: AggregationTemporality.CUMULATIVE,\n ObservableCounter: AggregationTemporality.CUMULATIVE,\n ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,\n ObservableGauge: AggregationTemporality.CUMULATIVE,\n }\n )\n self._collector = _CustomCollector()\n REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> None:\n if metrics_data is None:\n return\n self._collector.add_metrics_data(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n REGISTRY.unregister(self._collector)\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self):\n self._callback = None\n self._metrics_datas = deque()\n self._non_letters_digits_underscore_re = compile(\n r\"[^\\w]\", UNICODE | IGNORECASE\n )\n\n def add_metrics_data(self, metrics_data: MetricsData) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_datas.append(metrics_data)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n metric_family_id_metric_family = {}\n\n while self._metrics_datas:\n self._translate_to_prometheus(\n self._metrics_datas.popleft(), metric_family_id_metric_family\n )\n\n if metric_family_id_metric_family:\n for metric_family in metric_family_id_metric_family.values():\n yield metric_family\n\n # pylint: disable=too-many-locals,too-many-branches\n def _translate_to_prometheus(\n self,\n metrics_data: MetricsData,\n metric_family_id_metric_family: Dict[str, PrometheusMetric],\n ):\n metrics = []\n\n for resource_metrics in metrics_data.resource_metrics:\n for scope_metrics in resource_metrics.scope_metrics:\n for metric in scope_metrics.metrics:\n metrics.append(metric)\n\n for metric in metrics:\n label_valuess = []\n values = []\n\n pre_metric_family_ids = []\n\n metric_name = \"\"\n metric_name += self._sanitize(metric.name)\n\n metric_description = metric.description or \"\"\n\n for number_data_point in metric.data.data_points:\n label_keys = []\n label_values = []\n\n for key, value in number_data_point.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(self._check_value(value))\n\n pre_metric_family_ids.append(\n \"|\".join(\n [\n metric_name,\n metric_description,\n \"%\".join(label_keys),\n metric.unit,\n ]\n )\n )\n\n label_valuess.append(label_values)\n if isinstance(number_data_point, HistogramDataPoint):\n values.append(\n {\n \"bucket_counts\": number_data_point.bucket_counts,\n \"explicit_bounds\": (\n number_data_point.explicit_bounds\n ),\n \"sum\": number_data_point.sum,\n }\n )\n else:\n values.append(number_data_point.value)\n\n for pre_metric_family_id, label_values, value in zip(\n pre_metric_family_ids, label_valuess, values\n ):\n if isinstance(metric.data, Sum):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, CounterMetricFamily.__name__]\n )\n\n if metric_family_id not in metric_family_id_metric_family:\n metric_family_id_metric_family[\n metric_family_id\n ] = CounterMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Gauge):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, GaugeMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = GaugeMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Histogram):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, HistogramMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = HistogramMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(\n labels=label_values,\n buckets=_convert_buckets(\n value[\"bucket_counts\"], value[\"explicit_bounds\"]\n ),\n sum_value=value[\"sum\"],\n )\n else:\n _logger.warning(\n \"Unsupported metric data. %s\", type(metric.data)\n )\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n if not isinstance(value, str):\n return dumps(value, default=str)\n return str(value)\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}]}
3,635
438
gh_patches_debug_37743
rasdani/github-patches
git_diff
ydataai__ydata-profiling-157
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ZeroDivisionError when trying to profile a dataframe with only columns of object dtype An example: ``` >>> df = pd.DataFrame({'a': ['a', 'b']}) >>> pandas_profiling.ProfileReport(df) /home/me/.local/lib/python3.6/site-packages/matplotlib/axes/_base.py:2782: UserWarning: Attempting to set identical left==right results in singular transformations; automatically expanding. left=-0.5, right=-0.5 'left=%s, right=%s') % (left, right)) /home/me/.local/lib/python3.6/site-packages/matplotlib/axes/_base.py:3040: UserWarning: Attempting to set identical bottom==top results in singular transformations; automatically expanding. bottom=-0.5, top=-0.5 'bottom=%s, top=%s') % (bottom, top)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/__init__.py", line 69, in __init__ moved={'CParserError': 'pandas.errors.ParserError'}) File "/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/report.py", line 199, in to_html File "/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/plot.py", line 136, in correlation_matrix ZeroDivisionError: float division by zero ``` (EDIT: removed some personal information from the error message) The error is due to pandas-profiling trying to divide by the length of an empty dataframe in the function that plots the correlations matrices. Should be easy to fix by omitting the correlation matrix plots if there is no numerical data present in the dataframe. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pandas_profiling/report.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Generate reports""" 3 import sys 4 import six 5 import pandas as pd 6 import pandas_profiling.formatters as formatters 7 import pandas_profiling.templates as templates 8 import pandas_profiling.plot as plot 9 10 11 def to_html(sample, stats_object): 12 """Generate a HTML report from summary statistics and a given sample. 13 14 Parameters 15 ---------- 16 sample : DataFrame 17 the sample you want to print 18 stats_object : dict 19 Summary statistics. Should be generated with an appropriate describe() function 20 21 Returns 22 ------- 23 str 24 containing profile report in HTML format 25 26 Notes 27 ----- 28 * This function as to be refactored since it's huge and it contains inner functions 29 """ 30 31 n_obs = stats_object['table']['n'] 32 33 value_formatters = formatters.value_formatters 34 row_formatters = formatters.row_formatters 35 36 if not isinstance(sample, pd.DataFrame): 37 raise TypeError("sample must be of type pandas.DataFrame") 38 39 if not isinstance(stats_object, dict): 40 raise TypeError("stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?") 41 42 if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())): 43 raise TypeError( 44 "stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?") 45 46 def fmt(value, name): 47 if pd.isnull(value): 48 return "" 49 if name in value_formatters: 50 return value_formatters[name](value) 51 elif isinstance(value, float): 52 return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value) 53 else: 54 try: 55 return unicode(value) # Python 2 56 except NameError: 57 return str(value) # Python 3 58 59 60 def _format_row(freq, label, max_freq, row_template, n, extra_class=''): 61 if max_freq != 0: 62 width = int(freq / max_freq * 99) + 1 63 else: 64 width = 1 65 66 if width > 20: 67 label_in_bar = freq 68 label_after_bar = "" 69 else: 70 label_in_bar = "&nbsp;" 71 label_after_bar = freq 72 73 return row_template.render(label=label, 74 width=width, 75 count=freq, 76 percentage='{:2.1f}'.format(freq / n * 100), 77 extra_class=extra_class, 78 label_in_bar=label_in_bar, 79 label_after_bar=label_after_bar) 80 81 def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6): 82 83 freq_rows_html = u'' 84 85 if max_number_to_print > n: 86 max_number_to_print=n 87 88 if max_number_to_print < len(freqtable): 89 freq_other = sum(freqtable.iloc[max_number_to_print:]) 90 min_freq = freqtable.values[max_number_to_print] 91 else: 92 freq_other = 0 93 min_freq = 0 94 95 freq_missing = n - sum(freqtable) 96 max_freq = max(freqtable.values[0], freq_other, freq_missing) 97 98 # TODO: Correctly sort missing and other 99 100 for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]): 101 freq_rows_html += _format_row(freq, label, max_freq, row_template, n) 102 103 if freq_other > min_freq: 104 freq_rows_html += _format_row(freq_other, 105 "Other values (%s)" % (freqtable.count() - max_number_to_print), max_freq, row_template, n, 106 extra_class='other') 107 108 if freq_missing > min_freq: 109 freq_rows_html += _format_row(freq_missing, "(Missing)", max_freq, row_template, n, extra_class='missing') 110 111 return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col) 112 113 def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True): 114 115 # If it's mixed between base types (str, int) convert to str. Pure "mixed" types are filtered during type discovery 116 if "mixed" in freqtable.index.inferred_type: 117 freqtable.index = freqtable.index.astype(str) 118 119 sorted_freqTable = freqtable.sort_index() 120 121 if ascending: 122 obs_to_print = sorted_freqTable.iloc[:number_to_print] 123 else: 124 obs_to_print = sorted_freqTable.iloc[-number_to_print:] 125 126 freq_rows_html = '' 127 max_freq = max(obs_to_print.values) 128 129 for label, freq in six.iteritems(obs_to_print): 130 freq_rows_html += _format_row(freq, label, max_freq, row_template, n) 131 132 return table_template.render(rows=freq_rows_html) 133 134 # Variables 135 rows_html = u"" 136 messages = [] 137 138 for idx, row in stats_object['variables'].iterrows(): 139 140 formatted_values = {'varname': idx, 'varid': hash(idx)} 141 row_classes = {} 142 143 for col, value in six.iteritems(row): 144 formatted_values[col] = fmt(value, col) 145 146 for col in set(row.index) & six.viewkeys(row_formatters): 147 row_classes[col] = row_formatters[col](row[col]) 148 if row_classes[col] == "alert" and col in templates.messages: 149 messages.append(templates.messages[col].format(formatted_values, varname = idx)) 150 151 if row['type'] in {'CAT', 'BOOL'}: 152 formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs, 153 templates.template('mini_freq_table'), 154 templates.template('mini_freq_table_row'), 155 3, 156 templates.mini_freq_table_nb_col[row['type']]) 157 158 if row['distinct_count'] > 50: 159 messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx)) 160 row_classes['distinct_count'] = "alert" 161 else: 162 row_classes['distinct_count'] = "" 163 164 if row['type'] == 'UNIQUE': 165 obs = stats_object['freq'][idx].index 166 167 formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=["First 3 values"]).to_html(classes="example_values", index=False) 168 formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=["Last 3 values"]).to_html(classes="example_values", index=False) 169 if row['type'] == 'UNSUPPORTED': 170 formatted_values['varname'] = idx 171 messages.append(templates.messages[row['type']].format(formatted_values)) 172 elif row['type'] in {'CORR', 'CONST', 'RECODED'}: 173 formatted_values['varname'] = idx 174 messages.append(templates.messages[row['type']].format(formatted_values)) 175 else: 176 formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs, 177 templates.template('freq_table'), templates.template('freq_table_row'), 10) 178 formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True) 179 formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False) 180 181 rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes) 182 183 # Overview 184 formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])} 185 186 row_classes={} 187 for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters): 188 row_classes[col] = row_formatters[col](stats_object['table'][col]) 189 if row_classes[col] == "alert" and col in templates.messages: 190 messages.append(templates.messages[col].format(formatted_values, varname = idx)) 191 192 messages_html = u'' 193 for msg in messages: 194 messages_html += templates.message_row.format(message=msg) 195 196 overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html) 197 198 # Add plot of matrix correlation 199 pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson') 200 spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman') 201 correlations_html = templates.template('correlations').render( 202 values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix}) 203 204 # Add sample 205 sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes="sample")) 206 # TODO: should be done in the template 207 return templates.template('base').render({ 208 'overview_html': overview_html, 209 'rows_html': rows_html, 210 'sample_html': sample_html, 211 'correlation_html': correlations_html 212 }) 213 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pandas_profiling/report.py b/pandas_profiling/report.py --- a/pandas_profiling/report.py +++ b/pandas_profiling/report.py @@ -134,6 +134,7 @@ # Variables rows_html = u"" messages = [] + render_htmls = {} for idx, row in stats_object['variables'].iterrows(): @@ -179,6 +180,7 @@ formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False) rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes) + render_htmls['rows_html'] = rows_html # Overview formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])} @@ -194,19 +196,19 @@ messages_html += templates.message_row.format(message=msg) overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html) + render_htmls['overview_html'] = overview_html - # Add plot of matrix correlation - pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson') - spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman') - correlations_html = templates.template('correlations').render( - values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix}) + # Add plot of matrix correlation if the dataframe is not empty + if len(stats_object['correlations']['pearson']) > 0: + pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson') + spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman') + correlations_html = templates.template('correlations').render( + values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix}) + render_htmls['correlations_html'] = correlations_html # Add sample sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes="sample")) + render_htmls['sample_html'] = sample_html + # TODO: should be done in the template - return templates.template('base').render({ - 'overview_html': overview_html, - 'rows_html': rows_html, - 'sample_html': sample_html, - 'correlation_html': correlations_html - }) + return templates.template('base').render(render_htmls)
{"golden_diff": "diff --git a/pandas_profiling/report.py b/pandas_profiling/report.py\n--- a/pandas_profiling/report.py\n+++ b/pandas_profiling/report.py\n@@ -134,6 +134,7 @@\n # Variables\n rows_html = u\"\"\n messages = []\n+ render_htmls = {}\n \n for idx, row in stats_object['variables'].iterrows():\n \n@@ -179,6 +180,7 @@\n formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)\n \n rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)\n+ render_htmls['rows_html'] = rows_html\n \n # Overview\n formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}\n@@ -194,19 +196,19 @@\n messages_html += templates.message_row.format(message=msg)\n \n overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)\n+ render_htmls['overview_html'] = overview_html\n \n- # Add plot of matrix correlation\n- pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')\n- spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')\n- correlations_html = templates.template('correlations').render(\n- values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})\n+ # Add plot of matrix correlation if the dataframe is not empty\n+ if len(stats_object['correlations']['pearson']) > 0:\n+ pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')\n+ spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')\n+ correlations_html = templates.template('correlations').render(\n+ values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})\n+ render_htmls['correlations_html'] = correlations_html\n \n # Add sample\n sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes=\"sample\"))\n+ render_htmls['sample_html'] = sample_html\n+\n # TODO: should be done in the template\n- return templates.template('base').render({\n- 'overview_html': overview_html,\n- 'rows_html': rows_html,\n- 'sample_html': sample_html,\n- 'correlation_html': correlations_html\n- })\n+ return templates.template('base').render(render_htmls)\n", "issue": "ZeroDivisionError when trying to profile a dataframe with only columns of object dtype\nAn example:\r\n\r\n```\r\n>>> df = pd.DataFrame({'a': ['a', 'b']})\r\n>>> pandas_profiling.ProfileReport(df)\r\n/home/me/.local/lib/python3.6/site-packages/matplotlib/axes/_base.py:2782: UserWarning: Attempting to set identical left==right results\r\nin singular transformations; automatically expanding.\r\nleft=-0.5, right=-0.5\r\n 'left=%s, right=%s') % (left, right))\r\n/home/me/.local/lib/python3.6/site-packages/matplotlib/axes/_base.py:3040: UserWarning: Attempting to set identical bottom==top results\r\nin singular transformations; automatically expanding.\r\nbottom=-0.5, top=-0.5\r\n 'bottom=%s, top=%s') % (bottom, top))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/__init__.py\", line 69, in __init__\r\n moved={'CParserError': 'pandas.errors.ParserError'})\r\n File \"/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/report.py\", line 199, in to_html\r\n File \"/home/me/.local/lib/python3.6/site-packages/pandas_profiling-1.4.1-py3.6.egg/pandas_profiling/plot.py\", line 136, in correlation_matrix\r\n\r\nZeroDivisionError: float division by zero\r\n```\r\n(EDIT: removed some personal information from the error message)\r\n\r\nThe error is due to pandas-profiling trying to divide by the length of an empty dataframe in the function that plots the correlations matrices. Should be easy to fix by omitting the correlation matrix plots if there is no numerical data present in the dataframe.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Generate reports\"\"\"\nimport sys\nimport six\nimport pandas as pd\nimport pandas_profiling.formatters as formatters\nimport pandas_profiling.templates as templates\nimport pandas_profiling.plot as plot\n\n\ndef to_html(sample, stats_object):\n \"\"\"Generate a HTML report from summary statistics and a given sample.\n\n Parameters\n ----------\n sample : DataFrame\n the sample you want to print\n stats_object : dict\n Summary statistics. Should be generated with an appropriate describe() function\n\n Returns\n -------\n str\n containing profile report in HTML format\n\n Notes\n -----\n * This function as to be refactored since it's huge and it contains inner functions\n \"\"\"\n\n n_obs = stats_object['table']['n']\n\n value_formatters = formatters.value_formatters\n row_formatters = formatters.row_formatters\n\n if not isinstance(sample, pd.DataFrame):\n raise TypeError(\"sample must be of type pandas.DataFrame\")\n\n if not isinstance(stats_object, dict):\n raise TypeError(\"stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?\")\n\n if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())):\n raise TypeError(\n \"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?\")\n\n def fmt(value, name):\n if pd.isnull(value):\n return \"\"\n if name in value_formatters:\n return value_formatters[name](value)\n elif isinstance(value, float):\n return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)\n else:\n try:\n return unicode(value) # Python 2\n except NameError:\n return str(value) # Python 3\n \n\n def _format_row(freq, label, max_freq, row_template, n, extra_class=''):\n if max_freq != 0:\n width = int(freq / max_freq * 99) + 1\n else:\n width = 1\n\n if width > 20:\n label_in_bar = freq\n label_after_bar = \"\"\n else:\n label_in_bar = \"&nbsp;\"\n label_after_bar = freq\n\n return row_template.render(label=label,\n width=width,\n count=freq,\n percentage='{:2.1f}'.format(freq / n * 100),\n extra_class=extra_class,\n label_in_bar=label_in_bar,\n label_after_bar=label_after_bar)\n\n def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):\n\n freq_rows_html = u''\n\n if max_number_to_print > n:\n max_number_to_print=n\n\n if max_number_to_print < len(freqtable):\n freq_other = sum(freqtable.iloc[max_number_to_print:])\n min_freq = freqtable.values[max_number_to_print]\n else:\n freq_other = 0\n min_freq = 0\n\n freq_missing = n - sum(freqtable)\n max_freq = max(freqtable.values[0], freq_other, freq_missing)\n\n # TODO: Correctly sort missing and other\n\n for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n if freq_other > min_freq:\n freq_rows_html += _format_row(freq_other,\n \"Other values (%s)\" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,\n extra_class='other')\n\n if freq_missing > min_freq:\n freq_rows_html += _format_row(freq_missing, \"(Missing)\", max_freq, row_template, n, extra_class='missing')\n\n return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)\n\n def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):\n\n # If it's mixed between base types (str, int) convert to str. Pure \"mixed\" types are filtered during type discovery\n if \"mixed\" in freqtable.index.inferred_type:\n freqtable.index = freqtable.index.astype(str)\n\n sorted_freqTable = freqtable.sort_index()\n\n if ascending:\n obs_to_print = sorted_freqTable.iloc[:number_to_print]\n else:\n obs_to_print = sorted_freqTable.iloc[-number_to_print:]\n\n freq_rows_html = ''\n max_freq = max(obs_to_print.values)\n\n for label, freq in six.iteritems(obs_to_print):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n return table_template.render(rows=freq_rows_html)\n\n # Variables\n rows_html = u\"\"\n messages = []\n\n for idx, row in stats_object['variables'].iterrows():\n\n formatted_values = {'varname': idx, 'varid': hash(idx)}\n row_classes = {}\n\n for col, value in six.iteritems(row):\n formatted_values[col] = fmt(value, col)\n\n for col in set(row.index) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](row[col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = idx))\n\n if row['type'] in {'CAT', 'BOOL'}:\n formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('mini_freq_table'), \n templates.template('mini_freq_table_row'), \n 3, \n templates.mini_freq_table_nb_col[row['type']])\n\n if row['distinct_count'] > 50:\n messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx))\n row_classes['distinct_count'] = \"alert\"\n else:\n row_classes['distinct_count'] = \"\"\n\n if row['type'] == 'UNIQUE':\n obs = stats_object['freq'][idx].index\n\n formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=[\"First 3 values\"]).to_html(classes=\"example_values\", index=False)\n formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=[\"Last 3 values\"]).to_html(classes=\"example_values\", index=False)\n if row['type'] == 'UNSUPPORTED':\n formatted_values['varname'] = idx\n messages.append(templates.messages[row['type']].format(formatted_values))\n elif row['type'] in {'CORR', 'CONST', 'RECODED'}:\n formatted_values['varname'] = idx\n messages.append(templates.messages[row['type']].format(formatted_values))\n else:\n formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('freq_table'), templates.template('freq_table_row'), 10)\n formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)\n formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)\n\n rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)\n\n # Overview\n formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}\n\n row_classes={}\n for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](stats_object['table'][col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = idx))\n\n messages_html = u''\n for msg in messages:\n messages_html += templates.message_row.format(message=msg)\n\n overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)\n\n # Add plot of matrix correlation\n pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')\n spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')\n correlations_html = templates.template('correlations').render(\n values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})\n\n # Add sample\n sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes=\"sample\"))\n # TODO: should be done in the template\n return templates.template('base').render({\n 'overview_html': overview_html,\n 'rows_html': rows_html,\n 'sample_html': sample_html,\n 'correlation_html': correlations_html\n })\n", "path": "pandas_profiling/report.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Generate reports\"\"\"\nimport sys\nimport six\nimport pandas as pd\nimport pandas_profiling.formatters as formatters\nimport pandas_profiling.templates as templates\nimport pandas_profiling.plot as plot\n\n\ndef to_html(sample, stats_object):\n \"\"\"Generate a HTML report from summary statistics and a given sample.\n\n Parameters\n ----------\n sample : DataFrame\n the sample you want to print\n stats_object : dict\n Summary statistics. Should be generated with an appropriate describe() function\n\n Returns\n -------\n str\n containing profile report in HTML format\n\n Notes\n -----\n * This function as to be refactored since it's huge and it contains inner functions\n \"\"\"\n\n n_obs = stats_object['table']['n']\n\n value_formatters = formatters.value_formatters\n row_formatters = formatters.row_formatters\n\n if not isinstance(sample, pd.DataFrame):\n raise TypeError(\"sample must be of type pandas.DataFrame\")\n\n if not isinstance(stats_object, dict):\n raise TypeError(\"stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?\")\n\n if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())):\n raise TypeError(\n \"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?\")\n\n def fmt(value, name):\n if pd.isnull(value):\n return \"\"\n if name in value_formatters:\n return value_formatters[name](value)\n elif isinstance(value, float):\n return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)\n else:\n try:\n return unicode(value) # Python 2\n except NameError:\n return str(value) # Python 3\n \n\n def _format_row(freq, label, max_freq, row_template, n, extra_class=''):\n if max_freq != 0:\n width = int(freq / max_freq * 99) + 1\n else:\n width = 1\n\n if width > 20:\n label_in_bar = freq\n label_after_bar = \"\"\n else:\n label_in_bar = \"&nbsp;\"\n label_after_bar = freq\n\n return row_template.render(label=label,\n width=width,\n count=freq,\n percentage='{:2.1f}'.format(freq / n * 100),\n extra_class=extra_class,\n label_in_bar=label_in_bar,\n label_after_bar=label_after_bar)\n\n def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):\n\n freq_rows_html = u''\n\n if max_number_to_print > n:\n max_number_to_print=n\n\n if max_number_to_print < len(freqtable):\n freq_other = sum(freqtable.iloc[max_number_to_print:])\n min_freq = freqtable.values[max_number_to_print]\n else:\n freq_other = 0\n min_freq = 0\n\n freq_missing = n - sum(freqtable)\n max_freq = max(freqtable.values[0], freq_other, freq_missing)\n\n # TODO: Correctly sort missing and other\n\n for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n if freq_other > min_freq:\n freq_rows_html += _format_row(freq_other,\n \"Other values (%s)\" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,\n extra_class='other')\n\n if freq_missing > min_freq:\n freq_rows_html += _format_row(freq_missing, \"(Missing)\", max_freq, row_template, n, extra_class='missing')\n\n return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)\n\n def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):\n\n # If it's mixed between base types (str, int) convert to str. Pure \"mixed\" types are filtered during type discovery\n if \"mixed\" in freqtable.index.inferred_type:\n freqtable.index = freqtable.index.astype(str)\n\n sorted_freqTable = freqtable.sort_index()\n\n if ascending:\n obs_to_print = sorted_freqTable.iloc[:number_to_print]\n else:\n obs_to_print = sorted_freqTable.iloc[-number_to_print:]\n\n freq_rows_html = ''\n max_freq = max(obs_to_print.values)\n\n for label, freq in six.iteritems(obs_to_print):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n return table_template.render(rows=freq_rows_html)\n\n # Variables\n rows_html = u\"\"\n messages = []\n render_htmls = {}\n\n for idx, row in stats_object['variables'].iterrows():\n\n formatted_values = {'varname': idx, 'varid': hash(idx)}\n row_classes = {}\n\n for col, value in six.iteritems(row):\n formatted_values[col] = fmt(value, col)\n\n for col in set(row.index) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](row[col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = idx))\n\n if row['type'] in {'CAT', 'BOOL'}:\n formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('mini_freq_table'), \n templates.template('mini_freq_table_row'), \n 3, \n templates.mini_freq_table_nb_col[row['type']])\n\n if row['distinct_count'] > 50:\n messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx))\n row_classes['distinct_count'] = \"alert\"\n else:\n row_classes['distinct_count'] = \"\"\n\n if row['type'] == 'UNIQUE':\n obs = stats_object['freq'][idx].index\n\n formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=[\"First 3 values\"]).to_html(classes=\"example_values\", index=False)\n formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=[\"Last 3 values\"]).to_html(classes=\"example_values\", index=False)\n if row['type'] == 'UNSUPPORTED':\n formatted_values['varname'] = idx\n messages.append(templates.messages[row['type']].format(formatted_values))\n elif row['type'] in {'CORR', 'CONST', 'RECODED'}:\n formatted_values['varname'] = idx\n messages.append(templates.messages[row['type']].format(formatted_values))\n else:\n formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,\n templates.template('freq_table'), templates.template('freq_table_row'), 10)\n formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)\n formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)\n\n rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)\n render_htmls['rows_html'] = rows_html\n\n # Overview\n formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}\n\n row_classes={}\n for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):\n row_classes[col] = row_formatters[col](stats_object['table'][col])\n if row_classes[col] == \"alert\" and col in templates.messages:\n messages.append(templates.messages[col].format(formatted_values, varname = idx))\n\n messages_html = u''\n for msg in messages:\n messages_html += templates.message_row.format(message=msg)\n\n overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)\n render_htmls['overview_html'] = overview_html\n\n # Add plot of matrix correlation if the dataframe is not empty\n if len(stats_object['correlations']['pearson']) > 0:\n pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')\n spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')\n correlations_html = templates.template('correlations').render(\n values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})\n render_htmls['correlations_html'] = correlations_html\n\n # Add sample\n sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes=\"sample\"))\n render_htmls['sample_html'] = sample_html\n\n # TODO: should be done in the template\n return templates.template('base').render(render_htmls)\n", "path": "pandas_profiling/report.py"}]}
3,186
612
gh_patches_debug_40616
rasdani/github-patches
git_diff
optuna__optuna-1248
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `experimental` decorator adds extra indents to docs An extra indent appears in the docs when the [`experimental`](https://github.com/optuna/optuna/blob/2d5f24b06eed56ece72b8dfa878135bb4bb63779/optuna/_experimental.py#L60) decorator is added to a method of a class. This does not happen with a top level (i.e., not nested) class or function. ## Steps to reproduce 1. add an `experimental` decorator to a method of a class 2. make the document ## Reproducible examples The following figure shows the docs of [`Study.enqueue_trial`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.enqueue_trial). An extra indent appears after the second paragraph. ![image](https://user-images.githubusercontent.com/1061922/81144389-e6f32f80-8fae-11ea-8dd2-33368293dafa.png) ## Expected behavior no extra indent is shown after the second paragraph For example, the docs of [`Study.get_trials`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.get_trials), which does not have the `experimental` decorator, appears as expected. ![image](https://user-images.githubusercontent.com/1061922/81143489-f1142e80-8fac-11ea-9896-e56086228168.png) The following figure shows another example. The docs of the [`train`](https://optuna.readthedocs.io/en/latest/reference/integration.html#optuna.integration.lightgbm.train) function of LightGBMTuner has no extra indent. Although the function has an `experimental` decorator, it belongs to the top level classes. ![image](https://user-images.githubusercontent.com/1061922/81146606-98945f80-8fb3-11ea-8d67-5cf1cfbf8768.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optuna/_experimental.py` Content: ``` 1 import functools 2 import inspect 3 from typing import Any 4 from typing import Callable 5 import warnings 6 7 from optuna.exceptions import ExperimentalWarning 8 9 10 # White spaces of each line are necessary to beautifully rendered documentation. 11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require 12 # another four spaces. 13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """ 14 15 .. note:: 16 Added in v{ver} as an experimental feature. The interface may change in newer versions 17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. 18 """ 19 20 21 def _validate_version(version: str) -> None: 22 23 if not isinstance(version, str) or len(version.split(".")) != 3: 24 raise ValueError( 25 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format( 26 version 27 ) 28 ) 29 30 31 def experimental(version: str, name: str = None) -> Any: 32 """Decorate class or function as experimental. 33 34 Args: 35 version: The first version that supports the target feature. 36 name: The name of the feature. Defaults to the function or class name. Optional. 37 """ 38 39 _validate_version(version) 40 41 def _experimental_wrapper(f: Any) -> Any: 42 # f is either func or class. 43 44 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: 45 46 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 47 if func.__doc__ is None: 48 func.__doc__ = "" 49 func.__doc__ += docstring 50 51 # TODO(crcrpar): Annotate this correctly. 52 @functools.wraps(func) 53 def new_func(*args: Any, **kwargs: Any) -> Any: 54 """Wrapped function.""" 55 56 warnings.warn( 57 "{} is experimental (supported from v{}). " 58 "The interface can change in the future.".format( 59 name if name is not None else func.__name__, version 60 ), 61 ExperimentalWarning, 62 ) 63 64 return func(*args, **kwargs) # type: ignore 65 66 return new_func 67 68 def _experimental_class(cls: Any) -> Any: 69 """Decorates a class as experimental. 70 71 This decorator is supposed to be applied to the experimental class. 72 """ 73 74 _original_init = cls.__init__ 75 76 @functools.wraps(_original_init) 77 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore 78 warnings.warn( 79 "{} is experimental (supported from v{}). " 80 "The interface can change in the future.".format( 81 name if name is not None else cls.__name__, version 82 ), 83 ExperimentalWarning, 84 ) 85 86 _original_init(self, *args, **kwargs) 87 88 cls.__init__ = wrapped_init 89 90 if cls.__doc__ is None: 91 cls.__doc__ = "" 92 cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 93 return cls 94 95 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f) 96 97 return _experimental_wrapper 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optuna/_experimental.py b/optuna/_experimental.py --- a/optuna/_experimental.py +++ b/optuna/_experimental.py @@ -1,5 +1,6 @@ import functools import inspect +import textwrap from typing import Any from typing import Callable import warnings @@ -7,14 +8,11 @@ from optuna.exceptions import ExperimentalWarning -# White spaces of each line are necessary to beautifully rendered documentation. -# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require -# another four spaces. -_EXPERIMENTAL_DOCSTRING_TEMPLATE = """ +_EXPERIMENTAL_NOTE_TEMPLATE = """ - .. note:: - Added in v{ver} as an experimental feature. The interface may change in newer versions - without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. +.. note:: + Added in v{ver} as an experimental feature. The interface may change in newer versions + without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. """ @@ -28,6 +26,10 @@ ) +def _get_docstring_indent(docstring: str) -> str: + return docstring.split("\n")[-1] if "\n" in docstring else "" + + def experimental(version: str, name: str = None) -> Any: """Decorate class or function as experimental. @@ -43,16 +45,16 @@ def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: - docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) if func.__doc__ is None: func.__doc__ = "" - func.__doc__ += docstring + + note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version) + indent = _get_docstring_indent(func.__doc__) + func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent # TODO(crcrpar): Annotate this correctly. @functools.wraps(func) def new_func(*args: Any, **kwargs: Any) -> Any: - """Wrapped function.""" - warnings.warn( "{} is experimental (supported from v{}). " "The interface can change in the future.".format( @@ -70,7 +72,6 @@ This decorator is supposed to be applied to the experimental class. """ - _original_init = cls.__init__ @functools.wraps(_original_init) @@ -89,7 +90,11 @@ if cls.__doc__ is None: cls.__doc__ = "" - cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) + + note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version) + indent = _get_docstring_indent(cls.__doc__) + cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent + return cls return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
{"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -1,5 +1,6 @@\n import functools\n import inspect\n+import textwrap\n from typing import Any\n from typing import Callable\n import warnings\n@@ -7,14 +8,11 @@\n from optuna.exceptions import ExperimentalWarning\n \n \n-# White spaces of each line are necessary to beautifully rendered documentation.\n-# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n-# another four spaces.\n-_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n+_EXPERIMENTAL_NOTE_TEMPLATE = \"\"\"\n \n- .. note::\n- Added in v{ver} as an experimental feature. The interface may change in newer versions\n- without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n+.. note::\n+ Added in v{ver} as an experimental feature. The interface may change in newer versions\n+ without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n \"\"\"\n \n \n@@ -28,6 +26,10 @@\n )\n \n \n+def _get_docstring_indent(docstring: str) -> str:\n+ return docstring.split(\"\\n\")[-1] if \"\\n\" in docstring else \"\"\n+\n+\n def experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n \n@@ -43,16 +45,16 @@\n \n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n \n- docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n- func.__doc__ += docstring\n+\n+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n+ indent = _get_docstring_indent(func.__doc__)\n+ func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent\n \n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n- \"\"\"Wrapped function.\"\"\"\n-\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n@@ -70,7 +72,6 @@\n \n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n-\n _original_init = cls.__init__\n \n @functools.wraps(_original_init)\n@@ -89,7 +90,11 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n+\n+ note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n+ indent = _get_docstring_indent(cls.__doc__)\n+ cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent\n+\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator adds extra indents to docs\nAn extra indent appears in the docs when the [`experimental`](https://github.com/optuna/optuna/blob/2d5f24b06eed56ece72b8dfa878135bb4bb63779/optuna/_experimental.py#L60) decorator is added to a method of a class. This does not happen with a top level (i.e., not nested) class or function.\r\n\r\n## Steps to reproduce\r\n\r\n1. add an `experimental` decorator to a method of a class\r\n2. make the document\r\n\r\n## Reproducible examples\r\nThe following figure shows the docs of [`Study.enqueue_trial`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.enqueue_trial). An extra indent appears after the second paragraph.\r\n\r\n![image](https://user-images.githubusercontent.com/1061922/81144389-e6f32f80-8fae-11ea-8dd2-33368293dafa.png)\r\n\r\n## Expected behavior\r\nno extra indent is shown after the second paragraph\r\n\r\nFor example, the docs of [`Study.get_trials`](https://optuna.readthedocs.io/en/latest/reference/study.html#optuna.study.Study.get_trials), which does not have the `experimental` decorator, appears as expected.\r\n\r\n![image](https://user-images.githubusercontent.com/1061922/81143489-f1142e80-8fac-11ea-9896-e56086228168.png)\r\n\r\nThe following figure shows another example. The docs of the [`train`](https://optuna.readthedocs.io/en/latest/reference/integration.html#optuna.integration.lightgbm.train) function of LightGBMTuner has no extra indent. Although the function has an `experimental` decorator, it belongs to the top level classes.\r\n\r\n![image](https://user-images.githubusercontent.com/1061922/81146606-98945f80-8fb3-11ea-8d67-5cf1cfbf8768.png)\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}], "after_files": [{"content": "import functools\nimport inspect\nimport textwrap\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n_EXPERIMENTAL_NOTE_TEMPLATE = \"\"\"\n\n.. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef _get_docstring_indent(docstring: str) -> str:\n return docstring.split(\"\\n\")[-1] if \"\\n\" in docstring else \"\"\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n if func.__doc__ is None:\n func.__doc__ = \"\"\n\n note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n indent = _get_docstring_indent(func.__doc__)\n func.__doc__ = func.__doc__.strip() + textwrap.indent(note, indent) + indent\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n\n note = _EXPERIMENTAL_NOTE_TEMPLATE.format(ver=version)\n indent = _get_docstring_indent(cls.__doc__)\n cls.__doc__ = cls.__doc__.strip() + textwrap.indent(note, indent) + indent\n\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]}
1,622
711
gh_patches_debug_39775
rasdani/github-patches
git_diff
meltano__meltano-6582
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `default_environment` can only be set in `meltano.yml`, not using `meltano config meltano set` @tayloramurphy pointed out that `meltano config meltano set default_environment prod` or `meltano config meltano unset default_environment` doesn't do anything. This is because `default_environment` is not actually a setting, but a core property of `meltano.yml`: https://github.com/meltano/meltano/blob/dfd9650170ff97fd69fdff7ac8dc7dc0841bf663/src/meltano/core/meltano_file.py#L21 https://docs.meltano.com/concepts/environments#default-environments doesn’t suggest that `meltano config meltano` would work here, only setting it directly in `meltano.yml`. But https://docs.meltano.com/getting-started#view-and-activate-your-environments does call it a setting, and someone seeing it in `meltano.yml` could reasonably expect it to work just like the other keys in there like `send_anonymous_usage_stats` and `project_id`. If we want to make it a proper setting, we should be able to define it in `settings.yml` and replace https://github.com/meltano/meltano/blob/main/src/meltano/cli/cli.py#L70 with `ProjectSettingsService(project).get("default_environment")`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/cli/cli.py` Content: ``` 1 from __future__ import annotations 2 3 import logging # noqa: D100 4 import sys 5 from typing import NoReturn 6 7 import click 8 9 import meltano 10 from meltano.cli.utils import InstrumentedGroup 11 from meltano.core.behavior.versioned import IncompatibleVersionError 12 from meltano.core.legacy_tracking import LegacyTracker 13 from meltano.core.logging import LEVELS, setup_logging 14 from meltano.core.project import Project, ProjectNotFound 15 from meltano.core.project_settings_service import ProjectSettingsService 16 from meltano.core.tracking import CliContext, Tracker 17 18 logger = logging.getLogger(__name__) 19 20 21 class NoWindowsGlobbingGroup(InstrumentedGroup): 22 """A instrumented Click group that does not perform glob expansion on Windows. 23 24 This restores the behaviour of Click's globbing to how it was before v8. 25 Click (as of version 8.1.3) ignores quotes around an asterisk, which makes 26 it behave differently than most shells that support globbing, and make some 27 typical Meltano commands fail, e.g. `meltano select tap-gitlab tags "*"`. 28 """ 29 30 def main(self, *args, **kwargs) -> NoReturn: 31 """Invoke the Click CLI with Windows globbing disabled. 32 33 Args: 34 args: Positional arguments for the Click group. 35 kwargs: Keyword arguments for the Click group. 36 """ 37 return super().main(*args, windows_expand_args=False, **kwargs) 38 39 40 @click.group( 41 cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True 42 ) 43 @click.option("--log-level", type=click.Choice(LEVELS.keys())) 44 @click.option( 45 "--log-config", type=str, help="Path to a python logging yaml config file." 46 ) 47 @click.option("-v", "--verbose", count=True, help="Not used.") 48 @click.option( 49 "--environment", 50 envvar="MELTANO_ENVIRONMENT", 51 help="Meltano environment name.", 52 ) 53 @click.option( 54 "--no-environment", is_flag=True, default=False, help="Don't use any environment." 55 ) 56 @click.version_option(version=meltano.__version__, prog_name="meltano") 57 @click.pass_context 58 def cli( # noqa: WPS231 59 ctx, 60 log_level: str, 61 log_config: str, 62 verbose: int, 63 environment: str, 64 no_environment: bool, 65 ): # noqa: WPS231 66 """ 67 ELT for the DataOps era. 68 69 \b\nRead more at https://www.meltano.com/docs/command-line-interface.html 70 """ 71 ctx.ensure_object(dict) 72 73 if log_level: 74 ProjectSettingsService.config_override["cli.log_level"] = log_level 75 76 if log_config: 77 ProjectSettingsService.config_override["cli.log_config"] = log_config 78 79 ctx.obj["verbosity"] = verbose 80 try: # noqa: WPS229 81 project = Project.find() 82 setup_logging(project) 83 84 readonly = ProjectSettingsService(project).get("project_readonly") 85 if readonly: 86 project.readonly = True 87 if project.readonly: 88 logger.debug("Project is read-only.") 89 90 # detect active environment 91 selected_environment = None 92 is_default_environment = False 93 if no_environment or (environment and environment.lower() == "null"): 94 logger.info("No environment is active") 95 elif environment: 96 selected_environment = environment 97 elif project.meltano.default_environment: 98 selected_environment = project.meltano.default_environment 99 is_default_environment = True 100 # activate environment 101 if selected_environment: 102 project.activate_environment(selected_environment) 103 logger.info( 104 "Environment '%s' is active", selected_environment # noqa: WPS323 105 ) 106 ctx.obj["is_default_environment"] = is_default_environment 107 ctx.obj["project"] = project 108 ctx.obj["tracker"] = Tracker(project) 109 ctx.obj["tracker"].add_contexts( 110 CliContext.from_click_context(ctx) 111 ) # backfill the `cli` CliContext 112 ctx.obj["legacy_tracker"] = LegacyTracker( 113 project, context_overrides=ctx.obj["tracker"].contexts 114 ) 115 except ProjectNotFound: 116 ctx.obj["project"] = None 117 except IncompatibleVersionError: 118 click.secho( 119 "This Meltano project is incompatible with this version of `meltano`.", 120 fg="yellow", 121 ) 122 click.echo( 123 "For more details, visit http://meltano.com/docs/installation.html#upgrading-meltano-version" 124 ) 125 sys.exit(3) 126 ``` Path: `src/meltano/core/meltano_file.py` Content: ``` 1 """Module for working with meltano.yml files.""" 2 from __future__ import annotations 3 4 import copy 5 from typing import Iterable 6 7 from meltano.core.behavior.canonical import Canonical 8 from meltano.core.environment import Environment 9 from meltano.core.plugin import PluginType 10 from meltano.core.plugin.project_plugin import ProjectPlugin 11 from meltano.core.schedule import Schedule 12 from meltano.core.task_sets import TaskSets 13 14 VERSION = 1 15 16 17 class MeltanoFile(Canonical): 18 """Data and loading methods for meltano.yml files.""" 19 20 def __init__( 21 self, 22 version: int = VERSION, 23 default_environment: str | None = None, 24 plugins: dict[str, dict] = None, 25 schedules: list[dict] = None, 26 environments: list[dict] = None, 27 jobs: list[dict] = None, 28 env: dict[str, str] = None, 29 **extras, 30 ): 31 """Construct a new MeltanoFile object from meltano.yml file. 32 33 Args: 34 version: The meltano.yml version, currently always 1. 35 default_environment: The default environment to use for commands in this project. 36 plugins: Plugin configuration for this project. 37 schedules: Schedule configuration for this project. 38 environments: Environment configuration for this project. 39 jobs: Job configuration for this project. 40 env: Environment variables for this project. 41 extras: Additional configuration for this project. 42 """ 43 super().__init__( 44 # Attributes will be listed in meltano.yml in this order: 45 version=version, 46 default_environment=default_environment, 47 extras=extras, 48 plugins=self.load_plugins(plugins or {}), 49 schedules=self.load_schedules(schedules or []), 50 environments=self.load_environments(environments or []), 51 jobs=self.load_job_tasks(jobs or []), 52 env=env or {}, 53 ) 54 55 def load_plugins(self, plugins: dict[str, dict]) -> Canonical: 56 """Parse the `meltano.yml` file and return it as `ProjectPlugin` instances. 57 58 Args: 59 plugins: Dictionary of plugin configurations. 60 61 Returns: 62 New ProjectPlugin instances. 63 """ 64 plugin_type_plugins = Canonical() 65 66 for ptype in PluginType: 67 plugin_type_plugins[ptype] = [] 68 69 # this will parse the meltano.yml file and create an instance of the 70 # corresponding `plugin_class` for all the plugins. 71 for plugin_type, raw_plugins in plugins.items(): 72 if plugin_type == PluginType.MAPPERS: 73 for mapper in raw_plugins: 74 plugin_type_plugins[PluginType.MAPPERS].append( 75 ProjectPlugin(PluginType.MAPPERS, **mapper) 76 ) 77 plugin_type_plugins[PluginType.MAPPERS].extend( 78 self.get_plugins_for_mappings(mapper) 79 ) 80 else: 81 for raw_plugin in raw_plugins: 82 plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin) 83 plugin_type_plugins[plugin.type].append(plugin) 84 85 return plugin_type_plugins 86 87 def load_schedules(self, schedules: list[dict]) -> list[Schedule]: 88 """Parse the meltano.yml file and return it as Schedule instances. 89 90 Args: 91 schedules: List of schedule configurations. 92 93 Returns: 94 List of new Schedule instances. 95 """ 96 return list(map(Schedule.parse, schedules)) 97 98 @staticmethod 99 def load_environments(environments: Iterable[dict]) -> list[Environment]: 100 """Parse `Environment` objects from python objects. 101 102 Args: 103 environments: Sequence of environment dictionaries. 104 105 Returns: 106 A list of `Environment` objects. 107 """ 108 return [Environment.parse(obj) for obj in environments] 109 110 @staticmethod 111 def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]: 112 """Parse `TaskSets` objects from python objects. 113 114 Args: 115 jobs: Sequence of job dictionaries. 116 117 Returns: 118 A list of `Job` objects. 119 """ 120 return [TaskSets.parse(obj) for obj in jobs] 121 122 @staticmethod 123 def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]: 124 """Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config. 125 126 Args: 127 mapper_config: The dict representation of a mapper config found in in meltano.yml. 128 129 Returns: 130 A list of `ProjectPlugin` instances. 131 """ 132 mapping_plugins: list[ProjectPlugin] = [] 133 for mapping in mapper_config.get("mappings", []): 134 raw_mapping_plugin = copy.deepcopy(mapper_config) 135 raw_mapping_plugin["mapping"] = True 136 raw_mapping_plugin["mapping_name"] = mapping.get("name") 137 raw_mapping_plugin["config"] = mapping.get("config") 138 mapping_plugins.append( 139 ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin) 140 ) 141 return mapping_plugins 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py --- a/src/meltano/cli/cli.py +++ b/src/meltano/cli/cli.py @@ -1,6 +1,8 @@ +"""Definition of the top-level Click group for the Meltano CLI.""" + from __future__ import annotations -import logging # noqa: D100 +import logging import sys from typing import NoReturn @@ -77,11 +79,13 @@ ProjectSettingsService.config_override["cli.log_config"] = log_config ctx.obj["verbosity"] = verbose + try: # noqa: WPS229 project = Project.find() setup_logging(project) + project_setting_service = ProjectSettingsService(project) - readonly = ProjectSettingsService(project).get("project_readonly") + readonly = project_setting_service.get("project_readonly") if readonly: project.readonly = True if project.readonly: @@ -94,8 +98,8 @@ logger.info("No environment is active") elif environment: selected_environment = environment - elif project.meltano.default_environment: - selected_environment = project.meltano.default_environment + elif project_setting_service.get("default_environment"): + selected_environment = project_setting_service.get("default_environment") is_default_environment = True # activate environment if selected_environment: diff --git a/src/meltano/core/meltano_file.py b/src/meltano/core/meltano_file.py --- a/src/meltano/core/meltano_file.py +++ b/src/meltano/core/meltano_file.py @@ -20,7 +20,6 @@ def __init__( self, version: int = VERSION, - default_environment: str | None = None, plugins: dict[str, dict] = None, schedules: list[dict] = None, environments: list[dict] = None, @@ -32,7 +31,6 @@ Args: version: The meltano.yml version, currently always 1. - default_environment: The default environment to use for commands in this project. plugins: Plugin configuration for this project. schedules: Schedule configuration for this project. environments: Environment configuration for this project. @@ -43,7 +41,6 @@ super().__init__( # Attributes will be listed in meltano.yml in this order: version=version, - default_environment=default_environment, extras=extras, plugins=self.load_plugins(plugins or {}), schedules=self.load_schedules(schedules or []),
{"golden_diff": "diff --git a/src/meltano/cli/cli.py b/src/meltano/cli/cli.py\n--- a/src/meltano/cli/cli.py\n+++ b/src/meltano/cli/cli.py\n@@ -1,6 +1,8 @@\n+\"\"\"Definition of the top-level Click group for the Meltano CLI.\"\"\"\n+\n from __future__ import annotations\n \n-import logging # noqa: D100\n+import logging\n import sys\n from typing import NoReturn\n \n@@ -77,11 +79,13 @@\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n \n ctx.obj[\"verbosity\"] = verbose\n+\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n+ project_setting_service = ProjectSettingsService(project)\n \n- readonly = ProjectSettingsService(project).get(\"project_readonly\")\n+ readonly = project_setting_service.get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n@@ -94,8 +98,8 @@\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n- elif project.meltano.default_environment:\n- selected_environment = project.meltano.default_environment\n+ elif project_setting_service.get(\"default_environment\"):\n+ selected_environment = project_setting_service.get(\"default_environment\")\n is_default_environment = True\n # activate environment\n if selected_environment:\ndiff --git a/src/meltano/core/meltano_file.py b/src/meltano/core/meltano_file.py\n--- a/src/meltano/core/meltano_file.py\n+++ b/src/meltano/core/meltano_file.py\n@@ -20,7 +20,6 @@\n def __init__(\n self,\n version: int = VERSION,\n- default_environment: str | None = None,\n plugins: dict[str, dict] = None,\n schedules: list[dict] = None,\n environments: list[dict] = None,\n@@ -32,7 +31,6 @@\n \n Args:\n version: The meltano.yml version, currently always 1.\n- default_environment: The default environment to use for commands in this project.\n plugins: Plugin configuration for this project.\n schedules: Schedule configuration for this project.\n environments: Environment configuration for this project.\n@@ -43,7 +41,6 @@\n super().__init__(\n # Attributes will be listed in meltano.yml in this order:\n version=version,\n- default_environment=default_environment,\n extras=extras,\n plugins=self.load_plugins(plugins or {}),\n schedules=self.load_schedules(schedules or []),\n", "issue": "`default_environment` can only be set in `meltano.yml`, not using `meltano config meltano set`\n@tayloramurphy pointed out that `meltano config meltano set default_environment prod` or `meltano config meltano unset default_environment` doesn't do anything.\r\n\r\nThis is because `default_environment` is not actually a setting, but a core property of `meltano.yml`: https://github.com/meltano/meltano/blob/dfd9650170ff97fd69fdff7ac8dc7dc0841bf663/src/meltano/core/meltano_file.py#L21\r\n\r\nhttps://docs.meltano.com/concepts/environments#default-environments doesn\u2019t suggest that `meltano config meltano` would work here, only setting it directly in `meltano.yml`. But https://docs.meltano.com/getting-started#view-and-activate-your-environments does call it a setting, and someone seeing it in `meltano.yml` could reasonably expect it to work just like the other keys in there like `send_anonymous_usage_stats` and `project_id`.\r\n\r\nIf we want to make it a proper setting, we should be able to define it in `settings.yml` and replace https://github.com/meltano/meltano/blob/main/src/meltano/cli/cli.py#L70 with `ProjectSettingsService(project).get(\"default_environment\")`.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging # noqa: D100\nimport sys\nfrom typing import NoReturn\n\nimport click\n\nimport meltano\nfrom meltano.cli.utils import InstrumentedGroup\nfrom meltano.core.behavior.versioned import IncompatibleVersionError\nfrom meltano.core.legacy_tracking import LegacyTracker\nfrom meltano.core.logging import LEVELS, setup_logging\nfrom meltano.core.project import Project, ProjectNotFound\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.tracking import CliContext, Tracker\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoWindowsGlobbingGroup(InstrumentedGroup):\n \"\"\"A instrumented Click group that does not perform glob expansion on Windows.\n\n This restores the behaviour of Click's globbing to how it was before v8.\n Click (as of version 8.1.3) ignores quotes around an asterisk, which makes\n it behave differently than most shells that support globbing, and make some\n typical Meltano commands fail, e.g. `meltano select tap-gitlab tags \"*\"`.\n \"\"\"\n\n def main(self, *args, **kwargs) -> NoReturn:\n \"\"\"Invoke the Click CLI with Windows globbing disabled.\n\n Args:\n args: Positional arguments for the Click group.\n kwargs: Keyword arguments for the Click group.\n \"\"\"\n return super().main(*args, windows_expand_args=False, **kwargs)\n\n\[email protected](\n cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True\n)\[email protected](\"--log-level\", type=click.Choice(LEVELS.keys()))\[email protected](\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n)\[email protected](\"-v\", \"--verbose\", count=True, help=\"Not used.\")\[email protected](\n \"--environment\",\n envvar=\"MELTANO_ENVIRONMENT\",\n help=\"Meltano environment name.\",\n)\[email protected](\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n)\[email protected]_option(version=meltano.__version__, prog_name=\"meltano\")\[email protected]_context\ndef cli( # noqa: WPS231\n ctx,\n log_level: str,\n log_config: str,\n verbose: int,\n environment: str,\n no_environment: bool,\n): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html\n \"\"\"\n ctx.ensure_object(dict)\n\n if log_level:\n ProjectSettingsService.config_override[\"cli.log_level\"] = log_level\n\n if log_config:\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n\n ctx.obj[\"verbosity\"] = verbose\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n\n readonly = ProjectSettingsService(project).get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n logger.debug(\"Project is read-only.\")\n\n # detect active environment\n selected_environment = None\n is_default_environment = False\n if no_environment or (environment and environment.lower() == \"null\"):\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n elif project.meltano.default_environment:\n selected_environment = project.meltano.default_environment\n is_default_environment = True\n # activate environment\n if selected_environment:\n project.activate_environment(selected_environment)\n logger.info(\n \"Environment '%s' is active\", selected_environment # noqa: WPS323\n )\n ctx.obj[\"is_default_environment\"] = is_default_environment\n ctx.obj[\"project\"] = project\n ctx.obj[\"tracker\"] = Tracker(project)\n ctx.obj[\"tracker\"].add_contexts(\n CliContext.from_click_context(ctx)\n ) # backfill the `cli` CliContext\n ctx.obj[\"legacy_tracker\"] = LegacyTracker(\n project, context_overrides=ctx.obj[\"tracker\"].contexts\n )\n except ProjectNotFound:\n ctx.obj[\"project\"] = None\n except IncompatibleVersionError:\n click.secho(\n \"This Meltano project is incompatible with this version of `meltano`.\",\n fg=\"yellow\",\n )\n click.echo(\n \"For more details, visit http://meltano.com/docs/installation.html#upgrading-meltano-version\"\n )\n sys.exit(3)\n", "path": "src/meltano/cli/cli.py"}, {"content": "\"\"\"Module for working with meltano.yml files.\"\"\"\nfrom __future__ import annotations\n\nimport copy\nfrom typing import Iterable\n\nfrom meltano.core.behavior.canonical import Canonical\nfrom meltano.core.environment import Environment\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.schedule import Schedule\nfrom meltano.core.task_sets import TaskSets\n\nVERSION = 1\n\n\nclass MeltanoFile(Canonical):\n \"\"\"Data and loading methods for meltano.yml files.\"\"\"\n\n def __init__(\n self,\n version: int = VERSION,\n default_environment: str | None = None,\n plugins: dict[str, dict] = None,\n schedules: list[dict] = None,\n environments: list[dict] = None,\n jobs: list[dict] = None,\n env: dict[str, str] = None,\n **extras,\n ):\n \"\"\"Construct a new MeltanoFile object from meltano.yml file.\n\n Args:\n version: The meltano.yml version, currently always 1.\n default_environment: The default environment to use for commands in this project.\n plugins: Plugin configuration for this project.\n schedules: Schedule configuration for this project.\n environments: Environment configuration for this project.\n jobs: Job configuration for this project.\n env: Environment variables for this project.\n extras: Additional configuration for this project.\n \"\"\"\n super().__init__(\n # Attributes will be listed in meltano.yml in this order:\n version=version,\n default_environment=default_environment,\n extras=extras,\n plugins=self.load_plugins(plugins or {}),\n schedules=self.load_schedules(schedules or []),\n environments=self.load_environments(environments or []),\n jobs=self.load_job_tasks(jobs or []),\n env=env or {},\n )\n\n def load_plugins(self, plugins: dict[str, dict]) -> Canonical:\n \"\"\"Parse the `meltano.yml` file and return it as `ProjectPlugin` instances.\n\n Args:\n plugins: Dictionary of plugin configurations.\n\n Returns:\n New ProjectPlugin instances.\n \"\"\"\n plugin_type_plugins = Canonical()\n\n for ptype in PluginType:\n plugin_type_plugins[ptype] = []\n\n # this will parse the meltano.yml file and create an instance of the\n # corresponding `plugin_class` for all the plugins.\n for plugin_type, raw_plugins in plugins.items():\n if plugin_type == PluginType.MAPPERS:\n for mapper in raw_plugins:\n plugin_type_plugins[PluginType.MAPPERS].append(\n ProjectPlugin(PluginType.MAPPERS, **mapper)\n )\n plugin_type_plugins[PluginType.MAPPERS].extend(\n self.get_plugins_for_mappings(mapper)\n )\n else:\n for raw_plugin in raw_plugins:\n plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin)\n plugin_type_plugins[plugin.type].append(plugin)\n\n return plugin_type_plugins\n\n def load_schedules(self, schedules: list[dict]) -> list[Schedule]:\n \"\"\"Parse the meltano.yml file and return it as Schedule instances.\n\n Args:\n schedules: List of schedule configurations.\n\n Returns:\n List of new Schedule instances.\n \"\"\"\n return list(map(Schedule.parse, schedules))\n\n @staticmethod\n def load_environments(environments: Iterable[dict]) -> list[Environment]:\n \"\"\"Parse `Environment` objects from python objects.\n\n Args:\n environments: Sequence of environment dictionaries.\n\n Returns:\n A list of `Environment` objects.\n \"\"\"\n return [Environment.parse(obj) for obj in environments]\n\n @staticmethod\n def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]:\n \"\"\"Parse `TaskSets` objects from python objects.\n\n Args:\n jobs: Sequence of job dictionaries.\n\n Returns:\n A list of `Job` objects.\n \"\"\"\n return [TaskSets.parse(obj) for obj in jobs]\n\n @staticmethod\n def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]:\n \"\"\"Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config.\n\n Args:\n mapper_config: The dict representation of a mapper config found in in meltano.yml.\n\n Returns:\n A list of `ProjectPlugin` instances.\n \"\"\"\n mapping_plugins: list[ProjectPlugin] = []\n for mapping in mapper_config.get(\"mappings\", []):\n raw_mapping_plugin = copy.deepcopy(mapper_config)\n raw_mapping_plugin[\"mapping\"] = True\n raw_mapping_plugin[\"mapping_name\"] = mapping.get(\"name\")\n raw_mapping_plugin[\"config\"] = mapping.get(\"config\")\n mapping_plugins.append(\n ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin)\n )\n return mapping_plugins\n", "path": "src/meltano/core/meltano_file.py"}], "after_files": [{"content": "\"\"\"Definition of the top-level Click group for the Meltano CLI.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport sys\nfrom typing import NoReturn\n\nimport click\n\nimport meltano\nfrom meltano.cli.utils import InstrumentedGroup\nfrom meltano.core.behavior.versioned import IncompatibleVersionError\nfrom meltano.core.legacy_tracking import LegacyTracker\nfrom meltano.core.logging import LEVELS, setup_logging\nfrom meltano.core.project import Project, ProjectNotFound\nfrom meltano.core.project_settings_service import ProjectSettingsService\nfrom meltano.core.tracking import CliContext, Tracker\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoWindowsGlobbingGroup(InstrumentedGroup):\n \"\"\"A instrumented Click group that does not perform glob expansion on Windows.\n\n This restores the behaviour of Click's globbing to how it was before v8.\n Click (as of version 8.1.3) ignores quotes around an asterisk, which makes\n it behave differently than most shells that support globbing, and make some\n typical Meltano commands fail, e.g. `meltano select tap-gitlab tags \"*\"`.\n \"\"\"\n\n def main(self, *args, **kwargs) -> NoReturn:\n \"\"\"Invoke the Click CLI with Windows globbing disabled.\n\n Args:\n args: Positional arguments for the Click group.\n kwargs: Keyword arguments for the Click group.\n \"\"\"\n return super().main(*args, windows_expand_args=False, **kwargs)\n\n\[email protected](\n cls=NoWindowsGlobbingGroup, invoke_without_command=True, no_args_is_help=True\n)\[email protected](\"--log-level\", type=click.Choice(LEVELS.keys()))\[email protected](\n \"--log-config\", type=str, help=\"Path to a python logging yaml config file.\"\n)\[email protected](\"-v\", \"--verbose\", count=True, help=\"Not used.\")\[email protected](\n \"--environment\",\n envvar=\"MELTANO_ENVIRONMENT\",\n help=\"Meltano environment name.\",\n)\[email protected](\n \"--no-environment\", is_flag=True, default=False, help=\"Don't use any environment.\"\n)\[email protected]_option(version=meltano.__version__, prog_name=\"meltano\")\[email protected]_context\ndef cli( # noqa: WPS231\n ctx,\n log_level: str,\n log_config: str,\n verbose: int,\n environment: str,\n no_environment: bool,\n): # noqa: WPS231\n \"\"\"\n ELT for the DataOps era.\n\n \\b\\nRead more at https://www.meltano.com/docs/command-line-interface.html\n \"\"\"\n ctx.ensure_object(dict)\n\n if log_level:\n ProjectSettingsService.config_override[\"cli.log_level\"] = log_level\n\n if log_config:\n ProjectSettingsService.config_override[\"cli.log_config\"] = log_config\n\n ctx.obj[\"verbosity\"] = verbose\n\n try: # noqa: WPS229\n project = Project.find()\n setup_logging(project)\n project_setting_service = ProjectSettingsService(project)\n\n readonly = project_setting_service.get(\"project_readonly\")\n if readonly:\n project.readonly = True\n if project.readonly:\n logger.debug(\"Project is read-only.\")\n\n # detect active environment\n selected_environment = None\n is_default_environment = False\n if no_environment or (environment and environment.lower() == \"null\"):\n logger.info(\"No environment is active\")\n elif environment:\n selected_environment = environment\n elif project_setting_service.get(\"default_environment\"):\n selected_environment = project_setting_service.get(\"default_environment\")\n is_default_environment = True\n # activate environment\n if selected_environment:\n project.activate_environment(selected_environment)\n logger.info(\n \"Environment '%s' is active\", selected_environment # noqa: WPS323\n )\n ctx.obj[\"is_default_environment\"] = is_default_environment\n ctx.obj[\"project\"] = project\n ctx.obj[\"tracker\"] = Tracker(project)\n ctx.obj[\"tracker\"].add_contexts(\n CliContext.from_click_context(ctx)\n ) # backfill the `cli` CliContext\n ctx.obj[\"legacy_tracker\"] = LegacyTracker(\n project, context_overrides=ctx.obj[\"tracker\"].contexts\n )\n except ProjectNotFound:\n ctx.obj[\"project\"] = None\n except IncompatibleVersionError:\n click.secho(\n \"This Meltano project is incompatible with this version of `meltano`.\",\n fg=\"yellow\",\n )\n click.echo(\n \"For more details, visit http://meltano.com/docs/installation.html#upgrading-meltano-version\"\n )\n sys.exit(3)\n", "path": "src/meltano/cli/cli.py"}, {"content": "\"\"\"Module for working with meltano.yml files.\"\"\"\nfrom __future__ import annotations\n\nimport copy\nfrom typing import Iterable\n\nfrom meltano.core.behavior.canonical import Canonical\nfrom meltano.core.environment import Environment\nfrom meltano.core.plugin import PluginType\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.schedule import Schedule\nfrom meltano.core.task_sets import TaskSets\n\nVERSION = 1\n\n\nclass MeltanoFile(Canonical):\n \"\"\"Data and loading methods for meltano.yml files.\"\"\"\n\n def __init__(\n self,\n version: int = VERSION,\n plugins: dict[str, dict] = None,\n schedules: list[dict] = None,\n environments: list[dict] = None,\n jobs: list[dict] = None,\n env: dict[str, str] = None,\n **extras,\n ):\n \"\"\"Construct a new MeltanoFile object from meltano.yml file.\n\n Args:\n version: The meltano.yml version, currently always 1.\n plugins: Plugin configuration for this project.\n schedules: Schedule configuration for this project.\n environments: Environment configuration for this project.\n jobs: Job configuration for this project.\n env: Environment variables for this project.\n extras: Additional configuration for this project.\n \"\"\"\n super().__init__(\n # Attributes will be listed in meltano.yml in this order:\n version=version,\n extras=extras,\n plugins=self.load_plugins(plugins or {}),\n schedules=self.load_schedules(schedules or []),\n environments=self.load_environments(environments or []),\n jobs=self.load_job_tasks(jobs or []),\n env=env or {},\n )\n\n def load_plugins(self, plugins: dict[str, dict]) -> Canonical:\n \"\"\"Parse the `meltano.yml` file and return it as `ProjectPlugin` instances.\n\n Args:\n plugins: Dictionary of plugin configurations.\n\n Returns:\n New ProjectPlugin instances.\n \"\"\"\n plugin_type_plugins = Canonical()\n\n for ptype in PluginType:\n plugin_type_plugins[ptype] = []\n\n # this will parse the meltano.yml file and create an instance of the\n # corresponding `plugin_class` for all the plugins.\n for plugin_type, raw_plugins in plugins.items():\n if plugin_type == PluginType.MAPPERS:\n for mapper in raw_plugins:\n plugin_type_plugins[PluginType.MAPPERS].append(\n ProjectPlugin(PluginType.MAPPERS, **mapper)\n )\n plugin_type_plugins[PluginType.MAPPERS].extend(\n self.get_plugins_for_mappings(mapper)\n )\n else:\n for raw_plugin in raw_plugins:\n plugin = ProjectPlugin(PluginType(plugin_type), **raw_plugin)\n plugin_type_plugins[plugin.type].append(plugin)\n\n return plugin_type_plugins\n\n def load_schedules(self, schedules: list[dict]) -> list[Schedule]:\n \"\"\"Parse the meltano.yml file and return it as Schedule instances.\n\n Args:\n schedules: List of schedule configurations.\n\n Returns:\n List of new Schedule instances.\n \"\"\"\n return list(map(Schedule.parse, schedules))\n\n @staticmethod\n def load_environments(environments: Iterable[dict]) -> list[Environment]:\n \"\"\"Parse `Environment` objects from python objects.\n\n Args:\n environments: Sequence of environment dictionaries.\n\n Returns:\n A list of `Environment` objects.\n \"\"\"\n return [Environment.parse(obj) for obj in environments]\n\n @staticmethod\n def load_job_tasks(jobs: Iterable[dict]) -> list[TaskSets]:\n \"\"\"Parse `TaskSets` objects from python objects.\n\n Args:\n jobs: Sequence of job dictionaries.\n\n Returns:\n A list of `Job` objects.\n \"\"\"\n return [TaskSets.parse(obj) for obj in jobs]\n\n @staticmethod\n def get_plugins_for_mappings(mapper_config: dict) -> list[ProjectPlugin]:\n \"\"\"Mapper plugins are a special case. They are not a single plugin, but actually a list of plugins generated from the mapping config defined within the mapper config.\n\n Args:\n mapper_config: The dict representation of a mapper config found in in meltano.yml.\n\n Returns:\n A list of `ProjectPlugin` instances.\n \"\"\"\n mapping_plugins: list[ProjectPlugin] = []\n for mapping in mapper_config.get(\"mappings\", []):\n raw_mapping_plugin = copy.deepcopy(mapper_config)\n raw_mapping_plugin[\"mapping\"] = True\n raw_mapping_plugin[\"mapping_name\"] = mapping.get(\"name\")\n raw_mapping_plugin[\"config\"] = mapping.get(\"config\")\n mapping_plugins.append(\n ProjectPlugin(PluginType.MAPPERS, **raw_mapping_plugin)\n )\n return mapping_plugins\n", "path": "src/meltano/core/meltano_file.py"}]}
3,206
573
gh_patches_debug_2071
rasdani/github-patches
git_diff
ibis-project__ibis-7364
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bug: Command drop view IF EXISTS does not exist in Oracle ### What happened? Oracle queries fail while dropping the view. IF EXISTS is not supported in Oracle: https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/DROP-VIEW.html ### What version of ibis are you using? 7.0.0 ### What backend(s) are you using, if any? Oracle ### Relevant log output ```sh sqlalchemy.exc.DatabaseError: (oracledb.exceptions.DatabaseError) ORA-00933: SQL command not properly ended [SQL: DROP VIEW IF EXISTS "_ibis_oracle_metadata_d4gbmh4h2fa2jnq5qo3o3rg6sa"] ``` ### Code of Conduct - [X] I agree to follow this project's Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ibis/backends/oracle/__init__.py` Content: ``` 1 """The Oracle backend.""" 2 3 from __future__ import annotations 4 5 import atexit 6 import contextlib 7 import sys 8 import warnings 9 from typing import TYPE_CHECKING, Any 10 11 import oracledb 12 13 from ibis import util 14 15 # Wow, this is truly horrible 16 # Get out your clippers, it's time to shave a yak. 17 # 18 # 1. snowflake-sqlalchemy doesn't support sqlalchemy 2.0 19 # 2. oracledb is only supported in sqlalchemy 2.0 20 # 3. Ergo, module hacking is required to avoid doing a silly amount of work 21 # to create multiple lockfiles or port snowflake away from sqlalchemy 22 # 4. Also the version needs to be spoofed to be >= 7 or else the cx_Oracle 23 # dialect barfs 24 oracledb.__version__ = oracledb.version = "7" 25 26 sys.modules["cx_Oracle"] = oracledb 27 28 import sqlalchemy as sa # noqa: E402 29 30 import ibis.common.exceptions as exc # noqa: E402 31 import ibis.expr.datatypes as dt # noqa: E402 32 import ibis.expr.operations as ops # noqa: E402 33 import ibis.expr.schema as sch # noqa: E402 34 from ibis.backends.base.sql.alchemy import ( # noqa: E402 35 AlchemyCompiler, 36 AlchemyExprTranslator, 37 BaseAlchemyBackend, 38 ) 39 from ibis.backends.oracle.datatypes import OracleType # noqa: E402 40 from ibis.backends.oracle.registry import operation_registry # noqa: E402 41 42 if TYPE_CHECKING: 43 from collections.abc import Iterable 44 45 46 class OracleExprTranslator(AlchemyExprTranslator): 47 _registry = operation_registry.copy() 48 _rewrites = AlchemyExprTranslator._rewrites.copy() 49 _dialect_name = "oracle" 50 _has_reduction_filter_syntax = False 51 _require_order_by = ( 52 *AlchemyExprTranslator._require_order_by, 53 ops.Reduction, 54 ops.Lag, 55 ops.Lead, 56 ) 57 58 _forbids_frame_clause = ( 59 *AlchemyExprTranslator._forbids_frame_clause, 60 ops.Lag, 61 ops.Lead, 62 ) 63 64 _quote_column_names = True 65 _quote_table_names = True 66 67 type_mapper = OracleType 68 69 70 class OracleCompiler(AlchemyCompiler): 71 translator_class = OracleExprTranslator 72 support_values_syntax_in_select = False 73 supports_indexed_grouping_keys = False 74 null_limit = None 75 76 77 class Backend(BaseAlchemyBackend): 78 name = "oracle" 79 compiler = OracleCompiler 80 supports_create_or_replace = False 81 supports_temporary_tables = True 82 _temporary_prefix = "GLOBAL TEMPORARY" 83 84 def do_connect( 85 self, 86 *, 87 user: str, 88 password: str, 89 host: str = "localhost", 90 port: int = 1521, 91 database: str | None = None, 92 sid: str | None = None, 93 service_name: str | None = None, 94 dsn: str | None = None, 95 **_: Any, 96 ) -> None: 97 """Create an Ibis client using the passed connection parameters. 98 99 Parameters 100 ---------- 101 user 102 Username 103 password 104 Password 105 host 106 Hostname 107 port 108 Port 109 database 110 Used as an Oracle service name if provided. 111 sid 112 Unique name of an Oracle Instance, used to construct a DSN if 113 provided. 114 service_name 115 Oracle service name, used to construct a DSN if provided. Only one 116 of database and service_name should be provided. 117 dsn 118 An Oracle Data Source Name. If provided, overrides all other 119 connection arguments except username and password. 120 """ 121 # SID: unique name of an INSTANCE running an oracle process (a single, identifiable machine) 122 # service name: an ALIAS to one (or many) individual instances that can 123 # be hotswapped without the client knowing / caring 124 if dsn is not None and ( 125 database is not None or sid is not None or service_name is not None 126 ): 127 warnings.warn( 128 "Oracle DSN provided, overriding additional provided connection arguments" 129 ) 130 131 if service_name is not None and database is not None: 132 raise exc.IbisInputError( 133 "Values provided for both service_name and database. " 134 "Both of these values map to an Oracle service_name, " 135 "please provide only one of them." 136 ) 137 138 if service_name is None and database is not None: 139 service_name = database 140 141 if dsn is None: 142 dsn = oracledb.makedsn(host, port, service_name=service_name, sid=sid) 143 url = sa.engine.url.make_url(f"oracle://{user}:{password}@{dsn}") 144 145 engine = sa.create_engine( 146 url, 147 poolclass=sa.pool.StaticPool, 148 # We set the statement cache size to 0 because Oracle will otherwise 149 # attempt to reuse prepared statements even if the type of the bound variable 150 # has changed. 151 # This is apparently accepted behavior. 152 # https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_b.html#statement-caching-in-thin-and-thick-modes 153 connect_args={"stmtcachesize": 0}, 154 ) 155 156 super().do_connect(engine) 157 158 def normalize_name(name): 159 if name is None: 160 return None 161 elif not name: 162 return "" 163 elif name.lower() == name: 164 return sa.sql.quoted_name(name, quote=True) 165 else: 166 return name 167 168 self.con.dialect.normalize_name = normalize_name 169 170 def _from_url(self, url: str, **kwargs): 171 return self.do_connect(user=url.username, password=url.password, dsn=url.host) 172 173 @property 174 def current_database(self) -> str: 175 return self._scalar_query("SELECT * FROM global_name") 176 177 def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]: 178 from sqlalchemy_views import CreateView, DropView 179 180 name = util.gen_name("oracle_metadata") 181 182 view = sa.table(name) 183 create_view = CreateView(view, sa.text(query)) 184 drop_view = DropView(view, if_exists=True) 185 186 t = sa.table( 187 "all_tab_columns", 188 sa.column("table_name"), 189 sa.column("column_name"), 190 sa.column("data_type"), 191 sa.column("data_precision"), 192 sa.column("data_scale"), 193 sa.column("nullable"), 194 ) 195 metadata_query = sa.select( 196 t.c.column_name, 197 t.c.data_type, 198 t.c.data_precision, 199 t.c.data_scale, 200 (t.c.nullable == "Y").label("nullable"), 201 ).where(t.c.table_name == name) 202 203 with self.begin() as con: 204 con.execute(create_view) 205 try: 206 results = con.execute(metadata_query).fetchall() 207 finally: 208 # drop the view no matter what 209 con.execute(drop_view) 210 211 for name, type_string, precision, scale, nullable in results: 212 if precision is not None and scale is not None and precision != 0: 213 typ = dt.Decimal(precision=precision, scale=scale, nullable=nullable) 214 elif precision == 0: 215 # TODO: how to disambiguate between int and float here without inspecting the value? 216 typ = dt.float 217 else: 218 typ = OracleType.from_string(type_string, nullable=nullable) 219 yield name, typ 220 221 def _table_from_schema( 222 self, 223 name: str, 224 schema: sch.Schema, 225 temp: bool = False, 226 database: str | None = None, 227 **kwargs: Any, 228 ) -> sa.Table: 229 if temp: 230 kwargs["oracle_on_commit"] = "PRESERVE ROWS" 231 t = super()._table_from_schema(name, schema, temp, database, **kwargs) 232 if temp: 233 atexit.register(self._clean_up_tmp_table, t) 234 return t 235 236 def _clean_up_tmp_table(self, name: str) -> None: 237 tmptable = self._get_sqla_table(name, autoload=False) 238 with self.begin() as bind: 239 # global temporary tables cannot be dropped without first truncating them 240 # 241 # https://stackoverflow.com/questions/32423397/force-oracle-drop-global-temp-table 242 # 243 # ignore DatabaseError exceptions because the table may not exist 244 # because it's already been deleted 245 with contextlib.suppress(sa.exc.DatabaseError): 246 bind.exec_driver_sql(f'TRUNCATE TABLE "{tmptable.name}"') 247 with contextlib.suppress(sa.exc.DatabaseError): 248 tmptable.drop(bind=bind) 249 250 def _clean_up_cached_table(self, op): 251 self._clean_up_tmp_table(op.name) 252 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ibis/backends/oracle/__init__.py b/ibis/backends/oracle/__init__.py --- a/ibis/backends/oracle/__init__.py +++ b/ibis/backends/oracle/__init__.py @@ -181,7 +181,7 @@ view = sa.table(name) create_view = CreateView(view, sa.text(query)) - drop_view = DropView(view, if_exists=True) + drop_view = DropView(view, if_exists=False) t = sa.table( "all_tab_columns",
{"golden_diff": "diff --git a/ibis/backends/oracle/__init__.py b/ibis/backends/oracle/__init__.py\n--- a/ibis/backends/oracle/__init__.py\n+++ b/ibis/backends/oracle/__init__.py\n@@ -181,7 +181,7 @@\n \n view = sa.table(name)\n create_view = CreateView(view, sa.text(query))\n- drop_view = DropView(view, if_exists=True)\n+ drop_view = DropView(view, if_exists=False)\n \n t = sa.table(\n \"all_tab_columns\",\n", "issue": "bug: Command drop view IF EXISTS does not exist in Oracle\n### What happened?\n\nOracle queries fail while dropping the view.\r\n\r\nIF EXISTS is not supported in Oracle: https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/DROP-VIEW.html\n\n### What version of ibis are you using?\n\n7.0.0\n\n### What backend(s) are you using, if any?\n\nOracle\n\n### Relevant log output\n\n```sh\nsqlalchemy.exc.DatabaseError: (oracledb.exceptions.DatabaseError) ORA-00933: SQL command not properly ended\r\n[SQL: DROP VIEW IF EXISTS \"_ibis_oracle_metadata_d4gbmh4h2fa2jnq5qo3o3rg6sa\"]\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"The Oracle backend.\"\"\"\n\nfrom __future__ import annotations\n\nimport atexit\nimport contextlib\nimport sys\nimport warnings\nfrom typing import TYPE_CHECKING, Any\n\nimport oracledb\n\nfrom ibis import util\n\n# Wow, this is truly horrible\n# Get out your clippers, it's time to shave a yak.\n#\n# 1. snowflake-sqlalchemy doesn't support sqlalchemy 2.0\n# 2. oracledb is only supported in sqlalchemy 2.0\n# 3. Ergo, module hacking is required to avoid doing a silly amount of work\n# to create multiple lockfiles or port snowflake away from sqlalchemy\n# 4. Also the version needs to be spoofed to be >= 7 or else the cx_Oracle\n# dialect barfs\noracledb.__version__ = oracledb.version = \"7\"\n\nsys.modules[\"cx_Oracle\"] = oracledb\n\nimport sqlalchemy as sa # noqa: E402\n\nimport ibis.common.exceptions as exc # noqa: E402\nimport ibis.expr.datatypes as dt # noqa: E402\nimport ibis.expr.operations as ops # noqa: E402\nimport ibis.expr.schema as sch # noqa: E402\nfrom ibis.backends.base.sql.alchemy import ( # noqa: E402\n AlchemyCompiler,\n AlchemyExprTranslator,\n BaseAlchemyBackend,\n)\nfrom ibis.backends.oracle.datatypes import OracleType # noqa: E402\nfrom ibis.backends.oracle.registry import operation_registry # noqa: E402\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n\nclass OracleExprTranslator(AlchemyExprTranslator):\n _registry = operation_registry.copy()\n _rewrites = AlchemyExprTranslator._rewrites.copy()\n _dialect_name = \"oracle\"\n _has_reduction_filter_syntax = False\n _require_order_by = (\n *AlchemyExprTranslator._require_order_by,\n ops.Reduction,\n ops.Lag,\n ops.Lead,\n )\n\n _forbids_frame_clause = (\n *AlchemyExprTranslator._forbids_frame_clause,\n ops.Lag,\n ops.Lead,\n )\n\n _quote_column_names = True\n _quote_table_names = True\n\n type_mapper = OracleType\n\n\nclass OracleCompiler(AlchemyCompiler):\n translator_class = OracleExprTranslator\n support_values_syntax_in_select = False\n supports_indexed_grouping_keys = False\n null_limit = None\n\n\nclass Backend(BaseAlchemyBackend):\n name = \"oracle\"\n compiler = OracleCompiler\n supports_create_or_replace = False\n supports_temporary_tables = True\n _temporary_prefix = \"GLOBAL TEMPORARY\"\n\n def do_connect(\n self,\n *,\n user: str,\n password: str,\n host: str = \"localhost\",\n port: int = 1521,\n database: str | None = None,\n sid: str | None = None,\n service_name: str | None = None,\n dsn: str | None = None,\n **_: Any,\n ) -> None:\n \"\"\"Create an Ibis client using the passed connection parameters.\n\n Parameters\n ----------\n user\n Username\n password\n Password\n host\n Hostname\n port\n Port\n database\n Used as an Oracle service name if provided.\n sid\n Unique name of an Oracle Instance, used to construct a DSN if\n provided.\n service_name\n Oracle service name, used to construct a DSN if provided. Only one\n of database and service_name should be provided.\n dsn\n An Oracle Data Source Name. If provided, overrides all other\n connection arguments except username and password.\n \"\"\"\n # SID: unique name of an INSTANCE running an oracle process (a single, identifiable machine)\n # service name: an ALIAS to one (or many) individual instances that can\n # be hotswapped without the client knowing / caring\n if dsn is not None and (\n database is not None or sid is not None or service_name is not None\n ):\n warnings.warn(\n \"Oracle DSN provided, overriding additional provided connection arguments\"\n )\n\n if service_name is not None and database is not None:\n raise exc.IbisInputError(\n \"Values provided for both service_name and database. \"\n \"Both of these values map to an Oracle service_name, \"\n \"please provide only one of them.\"\n )\n\n if service_name is None and database is not None:\n service_name = database\n\n if dsn is None:\n dsn = oracledb.makedsn(host, port, service_name=service_name, sid=sid)\n url = sa.engine.url.make_url(f\"oracle://{user}:{password}@{dsn}\")\n\n engine = sa.create_engine(\n url,\n poolclass=sa.pool.StaticPool,\n # We set the statement cache size to 0 because Oracle will otherwise\n # attempt to reuse prepared statements even if the type of the bound variable\n # has changed.\n # This is apparently accepted behavior.\n # https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_b.html#statement-caching-in-thin-and-thick-modes\n connect_args={\"stmtcachesize\": 0},\n )\n\n super().do_connect(engine)\n\n def normalize_name(name):\n if name is None:\n return None\n elif not name:\n return \"\"\n elif name.lower() == name:\n return sa.sql.quoted_name(name, quote=True)\n else:\n return name\n\n self.con.dialect.normalize_name = normalize_name\n\n def _from_url(self, url: str, **kwargs):\n return self.do_connect(user=url.username, password=url.password, dsn=url.host)\n\n @property\n def current_database(self) -> str:\n return self._scalar_query(\"SELECT * FROM global_name\")\n\n def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]:\n from sqlalchemy_views import CreateView, DropView\n\n name = util.gen_name(\"oracle_metadata\")\n\n view = sa.table(name)\n create_view = CreateView(view, sa.text(query))\n drop_view = DropView(view, if_exists=True)\n\n t = sa.table(\n \"all_tab_columns\",\n sa.column(\"table_name\"),\n sa.column(\"column_name\"),\n sa.column(\"data_type\"),\n sa.column(\"data_precision\"),\n sa.column(\"data_scale\"),\n sa.column(\"nullable\"),\n )\n metadata_query = sa.select(\n t.c.column_name,\n t.c.data_type,\n t.c.data_precision,\n t.c.data_scale,\n (t.c.nullable == \"Y\").label(\"nullable\"),\n ).where(t.c.table_name == name)\n\n with self.begin() as con:\n con.execute(create_view)\n try:\n results = con.execute(metadata_query).fetchall()\n finally:\n # drop the view no matter what\n con.execute(drop_view)\n\n for name, type_string, precision, scale, nullable in results:\n if precision is not None and scale is not None and precision != 0:\n typ = dt.Decimal(precision=precision, scale=scale, nullable=nullable)\n elif precision == 0:\n # TODO: how to disambiguate between int and float here without inspecting the value?\n typ = dt.float\n else:\n typ = OracleType.from_string(type_string, nullable=nullable)\n yield name, typ\n\n def _table_from_schema(\n self,\n name: str,\n schema: sch.Schema,\n temp: bool = False,\n database: str | None = None,\n **kwargs: Any,\n ) -> sa.Table:\n if temp:\n kwargs[\"oracle_on_commit\"] = \"PRESERVE ROWS\"\n t = super()._table_from_schema(name, schema, temp, database, **kwargs)\n if temp:\n atexit.register(self._clean_up_tmp_table, t)\n return t\n\n def _clean_up_tmp_table(self, name: str) -> None:\n tmptable = self._get_sqla_table(name, autoload=False)\n with self.begin() as bind:\n # global temporary tables cannot be dropped without first truncating them\n #\n # https://stackoverflow.com/questions/32423397/force-oracle-drop-global-temp-table\n #\n # ignore DatabaseError exceptions because the table may not exist\n # because it's already been deleted\n with contextlib.suppress(sa.exc.DatabaseError):\n bind.exec_driver_sql(f'TRUNCATE TABLE \"{tmptable.name}\"')\n with contextlib.suppress(sa.exc.DatabaseError):\n tmptable.drop(bind=bind)\n\n def _clean_up_cached_table(self, op):\n self._clean_up_tmp_table(op.name)\n", "path": "ibis/backends/oracle/__init__.py"}], "after_files": [{"content": "\"\"\"The Oracle backend.\"\"\"\n\nfrom __future__ import annotations\n\nimport atexit\nimport contextlib\nimport sys\nimport warnings\nfrom typing import TYPE_CHECKING, Any\n\nimport oracledb\n\nfrom ibis import util\n\n# Wow, this is truly horrible\n# Get out your clippers, it's time to shave a yak.\n#\n# 1. snowflake-sqlalchemy doesn't support sqlalchemy 2.0\n# 2. oracledb is only supported in sqlalchemy 2.0\n# 3. Ergo, module hacking is required to avoid doing a silly amount of work\n# to create multiple lockfiles or port snowflake away from sqlalchemy\n# 4. Also the version needs to be spoofed to be >= 7 or else the cx_Oracle\n# dialect barfs\noracledb.__version__ = oracledb.version = \"7\"\n\nsys.modules[\"cx_Oracle\"] = oracledb\n\nimport sqlalchemy as sa # noqa: E402\n\nimport ibis.common.exceptions as exc # noqa: E402\nimport ibis.expr.datatypes as dt # noqa: E402\nimport ibis.expr.operations as ops # noqa: E402\nimport ibis.expr.schema as sch # noqa: E402\nfrom ibis.backends.base.sql.alchemy import ( # noqa: E402\n AlchemyCompiler,\n AlchemyExprTranslator,\n BaseAlchemyBackend,\n)\nfrom ibis.backends.oracle.datatypes import OracleType # noqa: E402\nfrom ibis.backends.oracle.registry import operation_registry # noqa: E402\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n\nclass OracleExprTranslator(AlchemyExprTranslator):\n _registry = operation_registry.copy()\n _rewrites = AlchemyExprTranslator._rewrites.copy()\n _dialect_name = \"oracle\"\n _has_reduction_filter_syntax = False\n _require_order_by = (\n *AlchemyExprTranslator._require_order_by,\n ops.Reduction,\n ops.Lag,\n ops.Lead,\n )\n\n _forbids_frame_clause = (\n *AlchemyExprTranslator._forbids_frame_clause,\n ops.Lag,\n ops.Lead,\n )\n\n _quote_column_names = True\n _quote_table_names = True\n\n type_mapper = OracleType\n\n\nclass OracleCompiler(AlchemyCompiler):\n translator_class = OracleExprTranslator\n support_values_syntax_in_select = False\n supports_indexed_grouping_keys = False\n null_limit = None\n\n\nclass Backend(BaseAlchemyBackend):\n name = \"oracle\"\n compiler = OracleCompiler\n supports_create_or_replace = False\n supports_temporary_tables = True\n _temporary_prefix = \"GLOBAL TEMPORARY\"\n\n def do_connect(\n self,\n *,\n user: str,\n password: str,\n host: str = \"localhost\",\n port: int = 1521,\n database: str | None = None,\n sid: str | None = None,\n service_name: str | None = None,\n dsn: str | None = None,\n **_: Any,\n ) -> None:\n \"\"\"Create an Ibis client using the passed connection parameters.\n\n Parameters\n ----------\n user\n Username\n password\n Password\n host\n Hostname\n port\n Port\n database\n Used as an Oracle service name if provided.\n sid\n Unique name of an Oracle Instance, used to construct a DSN if\n provided.\n service_name\n Oracle service name, used to construct a DSN if provided. Only one\n of database and service_name should be provided.\n dsn\n An Oracle Data Source Name. If provided, overrides all other\n connection arguments except username and password.\n \"\"\"\n # SID: unique name of an INSTANCE running an oracle process (a single, identifiable machine)\n # service name: an ALIAS to one (or many) individual instances that can\n # be hotswapped without the client knowing / caring\n if dsn is not None and (\n database is not None or sid is not None or service_name is not None\n ):\n warnings.warn(\n \"Oracle DSN provided, overriding additional provided connection arguments\"\n )\n\n if service_name is not None and database is not None:\n raise exc.IbisInputError(\n \"Values provided for both service_name and database. \"\n \"Both of these values map to an Oracle service_name, \"\n \"please provide only one of them.\"\n )\n\n if service_name is None and database is not None:\n service_name = database\n\n if dsn is None:\n dsn = oracledb.makedsn(host, port, service_name=service_name, sid=sid)\n url = sa.engine.url.make_url(f\"oracle://{user}:{password}@{dsn}\")\n\n engine = sa.create_engine(\n url,\n poolclass=sa.pool.StaticPool,\n # We set the statement cache size to 0 because Oracle will otherwise\n # attempt to reuse prepared statements even if the type of the bound variable\n # has changed.\n # This is apparently accepted behavior.\n # https://python-oracledb.readthedocs.io/en/latest/user_guide/appendix_b.html#statement-caching-in-thin-and-thick-modes\n connect_args={\"stmtcachesize\": 0},\n )\n\n super().do_connect(engine)\n\n def normalize_name(name):\n if name is None:\n return None\n elif not name:\n return \"\"\n elif name.lower() == name:\n return sa.sql.quoted_name(name, quote=True)\n else:\n return name\n\n self.con.dialect.normalize_name = normalize_name\n\n def _from_url(self, url: str, **kwargs):\n return self.do_connect(user=url.username, password=url.password, dsn=url.host)\n\n @property\n def current_database(self) -> str:\n return self._scalar_query(\"SELECT * FROM global_name\")\n\n def _metadata(self, query: str) -> Iterable[tuple[str, dt.DataType]]:\n from sqlalchemy_views import CreateView, DropView\n\n name = util.gen_name(\"oracle_metadata\")\n\n view = sa.table(name)\n create_view = CreateView(view, sa.text(query))\n drop_view = DropView(view, if_exists=False)\n\n t = sa.table(\n \"all_tab_columns\",\n sa.column(\"table_name\"),\n sa.column(\"column_name\"),\n sa.column(\"data_type\"),\n sa.column(\"data_precision\"),\n sa.column(\"data_scale\"),\n sa.column(\"nullable\"),\n )\n metadata_query = sa.select(\n t.c.column_name,\n t.c.data_type,\n t.c.data_precision,\n t.c.data_scale,\n (t.c.nullable == \"Y\").label(\"nullable\"),\n ).where(t.c.table_name == name)\n\n with self.begin() as con:\n con.execute(create_view)\n try:\n results = con.execute(metadata_query).fetchall()\n finally:\n # drop the view no matter what\n con.execute(drop_view)\n\n for name, type_string, precision, scale, nullable in results:\n if precision is not None and scale is not None and precision != 0:\n typ = dt.Decimal(precision=precision, scale=scale, nullable=nullable)\n elif precision == 0:\n # TODO: how to disambiguate between int and float here without inspecting the value?\n typ = dt.float\n else:\n typ = OracleType.from_string(type_string, nullable=nullable)\n yield name, typ\n\n def _table_from_schema(\n self,\n name: str,\n schema: sch.Schema,\n temp: bool = False,\n database: str | None = None,\n **kwargs: Any,\n ) -> sa.Table:\n if temp:\n kwargs[\"oracle_on_commit\"] = \"PRESERVE ROWS\"\n t = super()._table_from_schema(name, schema, temp, database, **kwargs)\n if temp:\n atexit.register(self._clean_up_tmp_table, t)\n return t\n\n def _clean_up_tmp_table(self, name: str) -> None:\n tmptable = self._get_sqla_table(name, autoload=False)\n with self.begin() as bind:\n # global temporary tables cannot be dropped without first truncating them\n #\n # https://stackoverflow.com/questions/32423397/force-oracle-drop-global-temp-table\n #\n # ignore DatabaseError exceptions because the table may not exist\n # because it's already been deleted\n with contextlib.suppress(sa.exc.DatabaseError):\n bind.exec_driver_sql(f'TRUNCATE TABLE \"{tmptable.name}\"')\n with contextlib.suppress(sa.exc.DatabaseError):\n tmptable.drop(bind=bind)\n\n def _clean_up_cached_table(self, op):\n self._clean_up_tmp_table(op.name)\n", "path": "ibis/backends/oracle/__init__.py"}]}
3,031
129
gh_patches_debug_14346
rasdani/github-patches
git_diff
svthalia__concrexit-2857
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Become a member menu item is missing! ### Describe the bug Become a member menu item is missing! ### How to reproduce 1. Check staging, the menu item is missing ### Expected behaviour It should be there ### Screenshots If applicable, add screenshots to help explain your problem. ### Additional context Probably this was missed in #2728 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/registrations/apps.py` Content: ``` 1 """Configuration for the newsletters package.""" 2 from django.apps import AppConfig 3 from django.utils.translation import gettext_lazy as _ 4 5 6 class RegistrationsConfig(AppConfig): 7 """AppConfig for the registrations package.""" 8 9 name = "registrations" 10 verbose_name = _("Registrations") 11 12 def ready(self): 13 """Import the signals when the app is ready.""" 14 # pylint: disable=unused-import,import-outside-toplevel 15 from . import signals 16 from .payables import register 17 18 register() 19 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/registrations/apps.py b/website/registrations/apps.py --- a/website/registrations/apps.py +++ b/website/registrations/apps.py @@ -1,5 +1,6 @@ """Configuration for the newsletters package.""" from django.apps import AppConfig +from django.urls import reverse from django.utils.translation import gettext_lazy as _ @@ -16,3 +17,16 @@ from .payables import register register() + + def menu_items(self): + return { + "categories": [{"name": "association", "title": "Association", "key": 1}], + "items": [ + { + "category": "association", + "title": "Become a member", + "url": reverse("registrations:index"), + "key": 6, + }, + ], + }
{"golden_diff": "diff --git a/website/registrations/apps.py b/website/registrations/apps.py\n--- a/website/registrations/apps.py\n+++ b/website/registrations/apps.py\n@@ -1,5 +1,6 @@\n \"\"\"Configuration for the newsletters package.\"\"\"\n from django.apps import AppConfig\n+from django.urls import reverse\n from django.utils.translation import gettext_lazy as _\n \n \n@@ -16,3 +17,16 @@\n from .payables import register\n \n register()\n+\n+ def menu_items(self):\n+ return {\n+ \"categories\": [{\"name\": \"association\", \"title\": \"Association\", \"key\": 1}],\n+ \"items\": [\n+ {\n+ \"category\": \"association\",\n+ \"title\": \"Become a member\",\n+ \"url\": reverse(\"registrations:index\"),\n+ \"key\": 6,\n+ },\n+ ],\n+ }\n", "issue": "Become a member menu item is missing!\n### Describe the bug\r\nBecome a member menu item is missing!\r\n\r\n### How to reproduce\r\n1. Check staging, the menu item is missing\r\n\r\n\r\n### Expected behaviour\r\nIt should be there\r\n\r\n### Screenshots\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Additional context\r\nProbably this was missed in #2728 \n", "before_files": [{"content": "\"\"\"Configuration for the newsletters package.\"\"\"\nfrom django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass RegistrationsConfig(AppConfig):\n \"\"\"AppConfig for the registrations package.\"\"\"\n\n name = \"registrations\"\n verbose_name = _(\"Registrations\")\n\n def ready(self):\n \"\"\"Import the signals when the app is ready.\"\"\"\n # pylint: disable=unused-import,import-outside-toplevel\n from . import signals\n from .payables import register\n\n register()\n", "path": "website/registrations/apps.py"}], "after_files": [{"content": "\"\"\"Configuration for the newsletters package.\"\"\"\nfrom django.apps import AppConfig\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass RegistrationsConfig(AppConfig):\n \"\"\"AppConfig for the registrations package.\"\"\"\n\n name = \"registrations\"\n verbose_name = _(\"Registrations\")\n\n def ready(self):\n \"\"\"Import the signals when the app is ready.\"\"\"\n # pylint: disable=unused-import,import-outside-toplevel\n from . import signals\n from .payables import register\n\n register()\n\n def menu_items(self):\n return {\n \"categories\": [{\"name\": \"association\", \"title\": \"Association\", \"key\": 1}],\n \"items\": [\n {\n \"category\": \"association\",\n \"title\": \"Become a member\",\n \"url\": reverse(\"registrations:index\"),\n \"key\": 6,\n },\n ],\n }\n", "path": "website/registrations/apps.py"}]}
472
194
gh_patches_debug_13199
rasdani/github-patches
git_diff
Nitrate__Nitrate-671
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Replace smart_text() with smart_str() To suppress warning: `RemovedInDjango40Warning: smart_text() is deprecated in favor of smart_str()` Run command `runserver`, find out the warning messages from output. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/tcms/logs/views.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from django.conf import settings 4 from django.contrib.contenttypes.models import ContentType 5 6 from .models import TCMSLogModel 7 8 from django.utils.encoding import smart_text as smart_unicode 9 10 11 class TCMSLog: 12 """TCMS Log""" 13 14 def __init__(self, model): 15 super().__init__() 16 self.model = model 17 18 def get_new_log_object(self): 19 elements = ['who', 'field', 'original_value', 'new_value'] 20 21 for element in elements: 22 if not hasattr(self, element): 23 raise NotImplementedError( 24 f'Log does not have attribute {element}') 25 26 model = self.get_log_model() 27 new = model(**self.get_log_create_data()) 28 29 return new 30 31 def get_log_model(self): 32 """ 33 Get the log model to create with this class. 34 """ 35 return TCMSLogModel 36 37 def get_log_create_data(self): 38 return { 39 'content_object': self.model, 40 'site_id': settings.SITE_ID, 41 'who': self.who, 42 'field': self.field, 43 'original_value': self.original_value, 44 'new_value': self.new_value, 45 } 46 47 def make(self, who, new_value, field=None, original_value=None): 48 """Create new log""" 49 self.who = who 50 self.field = field or '' 51 self.original_value = original_value or '' 52 self.new_value = new_value 53 54 model = self.get_new_log_object() 55 model.save() 56 57 def lookup_content_type(self): 58 return ContentType.objects.get_for_model(self.model) 59 60 def get_query_set(self): 61 ctype = self.lookup_content_type() 62 model = self.get_log_model() 63 64 qs = model.objects.filter(content_type=ctype, 65 object_pk=smart_unicode(self.model.pk), 66 site=settings.SITE_ID) 67 qs = qs.select_related('who') 68 return qs 69 70 def list(self): 71 """List the logs""" 72 return self.get_query_set().all() 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py --- a/src/tcms/logs/views.py +++ b/src/tcms/logs/views.py @@ -5,7 +5,7 @@ from .models import TCMSLogModel -from django.utils.encoding import smart_text as smart_unicode +from django.utils.encoding import smart_str class TCMSLog: @@ -62,7 +62,7 @@ model = self.get_log_model() qs = model.objects.filter(content_type=ctype, - object_pk=smart_unicode(self.model.pk), + object_pk=smart_str(self.model.pk), site=settings.SITE_ID) qs = qs.select_related('who') return qs
{"golden_diff": "diff --git a/src/tcms/logs/views.py b/src/tcms/logs/views.py\n--- a/src/tcms/logs/views.py\n+++ b/src/tcms/logs/views.py\n@@ -5,7 +5,7 @@\n \n from .models import TCMSLogModel\n \n-from django.utils.encoding import smart_text as smart_unicode\n+from django.utils.encoding import smart_str\n \n \n class TCMSLog:\n@@ -62,7 +62,7 @@\n model = self.get_log_model()\n \n qs = model.objects.filter(content_type=ctype,\n- object_pk=smart_unicode(self.model.pk),\n+ object_pk=smart_str(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n", "issue": "Replace smart_text() with smart_str()\nTo suppress warning: `RemovedInDjango40Warning: smart_text() is deprecated in favor of smart_str()`\r\n\r\nRun command `runserver`, find out the warning messages from output.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import TCMSLogModel\n\nfrom django.utils.encoding import smart_text as smart_unicode\n\n\nclass TCMSLog:\n \"\"\"TCMS Log\"\"\"\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def get_new_log_object(self):\n elements = ['who', 'field', 'original_value', 'new_value']\n\n for element in elements:\n if not hasattr(self, element):\n raise NotImplementedError(\n f'Log does not have attribute {element}')\n\n model = self.get_log_model()\n new = model(**self.get_log_create_data())\n\n return new\n\n def get_log_model(self):\n \"\"\"\n Get the log model to create with this class.\n \"\"\"\n return TCMSLogModel\n\n def get_log_create_data(self):\n return {\n 'content_object': self.model,\n 'site_id': settings.SITE_ID,\n 'who': self.who,\n 'field': self.field,\n 'original_value': self.original_value,\n 'new_value': self.new_value,\n }\n\n def make(self, who, new_value, field=None, original_value=None):\n \"\"\"Create new log\"\"\"\n self.who = who\n self.field = field or ''\n self.original_value = original_value or ''\n self.new_value = new_value\n\n model = self.get_new_log_object()\n model.save()\n\n def lookup_content_type(self):\n return ContentType.objects.get_for_model(self.model)\n\n def get_query_set(self):\n ctype = self.lookup_content_type()\n model = self.get_log_model()\n\n qs = model.objects.filter(content_type=ctype,\n object_pk=smart_unicode(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n\n def list(self):\n \"\"\"List the logs\"\"\"\n return self.get_query_set().all()\n", "path": "src/tcms/logs/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom .models import TCMSLogModel\n\nfrom django.utils.encoding import smart_str\n\n\nclass TCMSLog:\n \"\"\"TCMS Log\"\"\"\n\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def get_new_log_object(self):\n elements = ['who', 'field', 'original_value', 'new_value']\n\n for element in elements:\n if not hasattr(self, element):\n raise NotImplementedError(\n f'Log does not have attribute {element}')\n\n model = self.get_log_model()\n new = model(**self.get_log_create_data())\n\n return new\n\n def get_log_model(self):\n \"\"\"\n Get the log model to create with this class.\n \"\"\"\n return TCMSLogModel\n\n def get_log_create_data(self):\n return {\n 'content_object': self.model,\n 'site_id': settings.SITE_ID,\n 'who': self.who,\n 'field': self.field,\n 'original_value': self.original_value,\n 'new_value': self.new_value,\n }\n\n def make(self, who, new_value, field=None, original_value=None):\n \"\"\"Create new log\"\"\"\n self.who = who\n self.field = field or ''\n self.original_value = original_value or ''\n self.new_value = new_value\n\n model = self.get_new_log_object()\n model.save()\n\n def lookup_content_type(self):\n return ContentType.objects.get_for_model(self.model)\n\n def get_query_set(self):\n ctype = self.lookup_content_type()\n model = self.get_log_model()\n\n qs = model.objects.filter(content_type=ctype,\n object_pk=smart_str(self.model.pk),\n site=settings.SITE_ID)\n qs = qs.select_related('who')\n return qs\n\n def list(self):\n \"\"\"List the logs\"\"\"\n return self.get_query_set().all()\n", "path": "src/tcms/logs/views.py"}]}
874
156
gh_patches_debug_5175
rasdani/github-patches
git_diff
feast-dev__feast-3717
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Saved Datasets breaks CLI registry-dump ## Expected Behavior After creating a saved dataset, the CLI command `feast registry-dump` should dump the contents of the registry to the terminal. ## Current Behavior After creating a saved dataset, the CLI command `feast registry-dump` breaks, with the following error: ``` Traceback (most recent call last): File "/Library/Frameworks/Python.framework/Versions/3.10/bin/feast", line 8, in <module> sys.exit(cli()) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py", line 1157, in __call__ return self.main(*args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py", line 1078, in main rv = self.invoke(ctx) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py", line 1688, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py", line 1434, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py", line 783, in invoke return __callback(*args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/decorators.py", line 33, in new_func return f(get_current_context(), *args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/cli.py", line 562, in registry_dump_command click.echo(registry_dump(repo_config, repo_path=repo)) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/usage.py", line 299, in wrapper raise exc.with_traceback(traceback) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/usage.py", line 288, in wrapper return func(*args, **kwargs) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/repo_operations.py", line 353, in registry_dump registry_dict = registry.to_dict(project=project) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/base_registry.py", line 648, in to_dict self.list_saved_datasets(project=project), key=lambda item: item.name File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/registry.py", line 696, in list_saved_datasets return proto_registry_utils.list_saved_datasets(registry_proto, project) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/proto_registry_utils.py", line 217, in list_saved_datasets if saved_dataset.project == project: AttributeError: project ``` This error is also replicated with a local clone of the master branch. ## Steps to reproduce 1. Create a saved dataset and save it with `store.create_saved_dataset()` 2. Run `feast registry-dump` This can be replicated with this repository: https://github.com/malcolmk181/feast-dqm-tutorial/tree/61a8d46c1452e48d51504b1b2c252426d557e87e Clone the repository and convert the `demo.py` into a notebook using `jupyter nbconvert demo.py --to notebook`. This notebook generally follows the [Validating Historical Features](https://docs.feast.dev/tutorials/validating-historical-features) tutorial. If you run the cells up to and not including the cell that creates the saved dataset, `feast registry-dump` will run successfully, and dump the registry contents to the terminal. If you run the cell that creates the saved dataset and repeat `feast registry-dump`, the error will occur. ### Specifications - Version: Feast 0.31.1 and Feast SDK 0.1.dev3032, Python 3.10.11 - Platform: Replicated in Debian 11.7 and macOS 12.6.8 - Subsystem: ## Possible Solution The error is thrown by line 217 of the `list_saved_dataset()` function in `sdk/python/feast/infra/registry/proto_registry_utils.py`: https://github.com/feast-dev/feast/blob/c75a01fce2d52cd18479ace748b8eb2e6c81c988/sdk/python/feast/infra/registry/proto_registry_utils.py#L217 I deserialized the `registry.db` file and after referencing the proto files found that the project attribute was successfully logged on the saved dataset object within the registry. It appears that within the `list_saved_datasets()` function that the `saved_dataset` proto needs to use `.spec` to access the `project` attribute. This would be consistent with the `get_saved_dataset()` function: https://github.com/feast-dev/feast/blob/c75a01fce2d52cd18479ace748b8eb2e6c81c988/sdk/python/feast/infra/registry/proto_registry_utils.py#L123 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/feast/infra/registry/proto_registry_utils.py` Content: ``` 1 import uuid 2 from typing import List, Optional 3 4 from feast import usage 5 from feast.data_source import DataSource 6 from feast.entity import Entity 7 from feast.errors import ( 8 DataSourceObjectNotFoundException, 9 EntityNotFoundException, 10 FeatureServiceNotFoundException, 11 FeatureViewNotFoundException, 12 SavedDatasetNotFound, 13 ValidationReferenceNotFound, 14 ) 15 from feast.feature_service import FeatureService 16 from feast.feature_view import FeatureView 17 from feast.on_demand_feature_view import OnDemandFeatureView 18 from feast.project_metadata import ProjectMetadata 19 from feast.protos.feast.core.Registry_pb2 import ProjectMetadata as ProjectMetadataProto 20 from feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto 21 from feast.request_feature_view import RequestFeatureView 22 from feast.saved_dataset import SavedDataset, ValidationReference 23 from feast.stream_feature_view import StreamFeatureView 24 25 26 def init_project_metadata(cached_registry_proto: RegistryProto, project: str): 27 new_project_uuid = f"{uuid.uuid4()}" 28 usage.set_current_project_uuid(new_project_uuid) 29 cached_registry_proto.project_metadata.append( 30 ProjectMetadata(project_name=project, project_uuid=new_project_uuid).to_proto() 31 ) 32 33 34 def get_project_metadata( 35 registry_proto: Optional[RegistryProto], project: str 36 ) -> Optional[ProjectMetadataProto]: 37 if not registry_proto: 38 return None 39 for pm in registry_proto.project_metadata: 40 if pm.project == project: 41 return pm 42 return None 43 44 45 def get_feature_service( 46 registry_proto: RegistryProto, name: str, project: str 47 ) -> FeatureService: 48 for feature_service_proto in registry_proto.feature_services: 49 if ( 50 feature_service_proto.spec.project == project 51 and feature_service_proto.spec.name == name 52 ): 53 return FeatureService.from_proto(feature_service_proto) 54 raise FeatureServiceNotFoundException(name, project=project) 55 56 57 def get_feature_view( 58 registry_proto: RegistryProto, name: str, project: str 59 ) -> FeatureView: 60 for feature_view_proto in registry_proto.feature_views: 61 if ( 62 feature_view_proto.spec.name == name 63 and feature_view_proto.spec.project == project 64 ): 65 return FeatureView.from_proto(feature_view_proto) 66 raise FeatureViewNotFoundException(name, project) 67 68 69 def get_stream_feature_view( 70 registry_proto: RegistryProto, name: str, project: str 71 ) -> StreamFeatureView: 72 for feature_view_proto in registry_proto.stream_feature_views: 73 if ( 74 feature_view_proto.spec.name == name 75 and feature_view_proto.spec.project == project 76 ): 77 return StreamFeatureView.from_proto(feature_view_proto) 78 raise FeatureViewNotFoundException(name, project) 79 80 81 def get_request_feature_view(registry_proto: RegistryProto, name: str, project: str): 82 for feature_view_proto in registry_proto.feature_views: 83 if ( 84 feature_view_proto.spec.name == name 85 and feature_view_proto.spec.project == project 86 ): 87 return RequestFeatureView.from_proto(feature_view_proto) 88 raise FeatureViewNotFoundException(name, project) 89 90 91 def get_on_demand_feature_view( 92 registry_proto: RegistryProto, name: str, project: str 93 ) -> OnDemandFeatureView: 94 for on_demand_feature_view in registry_proto.on_demand_feature_views: 95 if ( 96 on_demand_feature_view.spec.project == project 97 and on_demand_feature_view.spec.name == name 98 ): 99 return OnDemandFeatureView.from_proto(on_demand_feature_view) 100 raise FeatureViewNotFoundException(name, project=project) 101 102 103 def get_data_source( 104 registry_proto: RegistryProto, name: str, project: str 105 ) -> DataSource: 106 for data_source in registry_proto.data_sources: 107 if data_source.project == project and data_source.name == name: 108 return DataSource.from_proto(data_source) 109 raise DataSourceObjectNotFoundException(name, project=project) 110 111 112 def get_entity(registry_proto: RegistryProto, name: str, project: str) -> Entity: 113 for entity_proto in registry_proto.entities: 114 if entity_proto.spec.name == name and entity_proto.spec.project == project: 115 return Entity.from_proto(entity_proto) 116 raise EntityNotFoundException(name, project=project) 117 118 119 def get_saved_dataset( 120 registry_proto: RegistryProto, name: str, project: str 121 ) -> SavedDataset: 122 for saved_dataset in registry_proto.saved_datasets: 123 if saved_dataset.spec.name == name and saved_dataset.spec.project == project: 124 return SavedDataset.from_proto(saved_dataset) 125 raise SavedDatasetNotFound(name, project=project) 126 127 128 def get_validation_reference( 129 registry_proto: RegistryProto, name: str, project: str 130 ) -> ValidationReference: 131 for validation_reference in registry_proto.validation_references: 132 if ( 133 validation_reference.name == name 134 and validation_reference.project == project 135 ): 136 return ValidationReference.from_proto(validation_reference) 137 raise ValidationReferenceNotFound(name, project=project) 138 139 140 def list_feature_services( 141 registry_proto: RegistryProto, project: str, allow_cache: bool = False 142 ) -> List[FeatureService]: 143 feature_services = [] 144 for feature_service_proto in registry_proto.feature_services: 145 if feature_service_proto.spec.project == project: 146 feature_services.append(FeatureService.from_proto(feature_service_proto)) 147 return feature_services 148 149 150 def list_feature_views( 151 registry_proto: RegistryProto, project: str 152 ) -> List[FeatureView]: 153 feature_views: List[FeatureView] = [] 154 for feature_view_proto in registry_proto.feature_views: 155 if feature_view_proto.spec.project == project: 156 feature_views.append(FeatureView.from_proto(feature_view_proto)) 157 return feature_views 158 159 160 def list_request_feature_views( 161 registry_proto: RegistryProto, project: str 162 ) -> List[RequestFeatureView]: 163 feature_views: List[RequestFeatureView] = [] 164 for request_feature_view_proto in registry_proto.request_feature_views: 165 if request_feature_view_proto.spec.project == project: 166 feature_views.append( 167 RequestFeatureView.from_proto(request_feature_view_proto) 168 ) 169 return feature_views 170 171 172 def list_stream_feature_views( 173 registry_proto: RegistryProto, project: str 174 ) -> List[StreamFeatureView]: 175 stream_feature_views = [] 176 for stream_feature_view in registry_proto.stream_feature_views: 177 if stream_feature_view.spec.project == project: 178 stream_feature_views.append( 179 StreamFeatureView.from_proto(stream_feature_view) 180 ) 181 return stream_feature_views 182 183 184 def list_on_demand_feature_views( 185 registry_proto: RegistryProto, project: str 186 ) -> List[OnDemandFeatureView]: 187 on_demand_feature_views = [] 188 for on_demand_feature_view in registry_proto.on_demand_feature_views: 189 if on_demand_feature_view.spec.project == project: 190 on_demand_feature_views.append( 191 OnDemandFeatureView.from_proto(on_demand_feature_view) 192 ) 193 return on_demand_feature_views 194 195 196 def list_entities(registry_proto: RegistryProto, project: str) -> List[Entity]: 197 entities = [] 198 for entity_proto in registry_proto.entities: 199 if entity_proto.spec.project == project: 200 entities.append(Entity.from_proto(entity_proto)) 201 return entities 202 203 204 def list_data_sources(registry_proto: RegistryProto, project: str) -> List[DataSource]: 205 data_sources = [] 206 for data_source_proto in registry_proto.data_sources: 207 if data_source_proto.project == project: 208 data_sources.append(DataSource.from_proto(data_source_proto)) 209 return data_sources 210 211 212 def list_saved_datasets( 213 registry_proto: RegistryProto, project: str 214 ) -> List[SavedDataset]: 215 saved_datasets = [] 216 for saved_dataset in registry_proto.saved_datasets: 217 if saved_dataset.project == project: 218 saved_datasets.append(SavedDataset.from_proto(saved_dataset)) 219 return saved_datasets 220 221 222 def list_validation_references( 223 registry_proto: RegistryProto, project: str 224 ) -> List[ValidationReference]: 225 validation_references = [] 226 for validation_reference in registry_proto.validation_references: 227 if validation_reference.project == project: 228 validation_references.append( 229 ValidationReference.from_proto(validation_reference) 230 ) 231 return validation_references 232 233 234 def list_project_metadata( 235 registry_proto: RegistryProto, project: str 236 ) -> List[ProjectMetadata]: 237 return [ 238 ProjectMetadata.from_proto(project_metadata) 239 for project_metadata in registry_proto.project_metadata 240 if project_metadata.project == project 241 ] 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/feast/infra/registry/proto_registry_utils.py b/sdk/python/feast/infra/registry/proto_registry_utils.py --- a/sdk/python/feast/infra/registry/proto_registry_utils.py +++ b/sdk/python/feast/infra/registry/proto_registry_utils.py @@ -214,7 +214,7 @@ ) -> List[SavedDataset]: saved_datasets = [] for saved_dataset in registry_proto.saved_datasets: - if saved_dataset.project == project: + if saved_dataset.spec.project == project: saved_datasets.append(SavedDataset.from_proto(saved_dataset)) return saved_datasets
{"golden_diff": "diff --git a/sdk/python/feast/infra/registry/proto_registry_utils.py b/sdk/python/feast/infra/registry/proto_registry_utils.py\n--- a/sdk/python/feast/infra/registry/proto_registry_utils.py\n+++ b/sdk/python/feast/infra/registry/proto_registry_utils.py\n@@ -214,7 +214,7 @@\n ) -> List[SavedDataset]:\n saved_datasets = []\n for saved_dataset in registry_proto.saved_datasets:\n- if saved_dataset.project == project:\n+ if saved_dataset.spec.project == project:\n saved_datasets.append(SavedDataset.from_proto(saved_dataset))\n return saved_datasets\n", "issue": "Saved Datasets breaks CLI registry-dump\n## Expected Behavior\r\n\r\nAfter creating a saved dataset, the CLI command `feast registry-dump` should dump the contents of the registry to the terminal.\r\n\r\n## Current Behavior\r\n\r\nAfter creating a saved dataset, the CLI command `feast registry-dump` breaks, with the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/bin/feast\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py\", line 1157, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py\", line 1078, in main\r\n rv = self.invoke(ctx)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py\", line 1434, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/core.py\", line 783, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/click/decorators.py\", line 33, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/cli.py\", line 562, in registry_dump_command\r\n click.echo(registry_dump(repo_config, repo_path=repo))\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/usage.py\", line 299, in wrapper\r\n raise exc.with_traceback(traceback)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/usage.py\", line 288, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/repo_operations.py\", line 353, in registry_dump\r\n registry_dict = registry.to_dict(project=project)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/base_registry.py\", line 648, in to_dict\r\n self.list_saved_datasets(project=project), key=lambda item: item.name\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/registry.py\", line 696, in list_saved_datasets\r\n return proto_registry_utils.list_saved_datasets(registry_proto, project)\r\n File \"/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/feast/infra/registry/proto_registry_utils.py\", line 217, in list_saved_datasets\r\n if saved_dataset.project == project:\r\nAttributeError: project\r\n```\r\n\r\nThis error is also replicated with a local clone of the master branch.\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a saved dataset and save it with `store.create_saved_dataset()`\r\n2. Run `feast registry-dump`\r\n\r\nThis can be replicated with this repository: https://github.com/malcolmk181/feast-dqm-tutorial/tree/61a8d46c1452e48d51504b1b2c252426d557e87e\r\n\r\nClone the repository and convert the `demo.py` into a notebook using `jupyter nbconvert demo.py --to notebook`. This notebook generally follows the [Validating Historical Features](https://docs.feast.dev/tutorials/validating-historical-features) tutorial. If you run the cells up to and not including the cell that creates the saved dataset, `feast registry-dump` will run successfully, and dump the registry contents to the terminal. If you run the cell that creates the saved dataset and repeat `feast registry-dump`, the error will occur.\r\n\r\n### Specifications\r\n\r\n- Version: Feast 0.31.1 and Feast SDK 0.1.dev3032, Python 3.10.11\r\n- Platform: Replicated in Debian 11.7 and macOS 12.6.8\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\r\nThe error is thrown by line 217 of the `list_saved_dataset()` function in `sdk/python/feast/infra/registry/proto_registry_utils.py`:\r\n\r\nhttps://github.com/feast-dev/feast/blob/c75a01fce2d52cd18479ace748b8eb2e6c81c988/sdk/python/feast/infra/registry/proto_registry_utils.py#L217\r\n\r\nI deserialized the `registry.db` file and after referencing the proto files found that the project attribute was successfully logged on the saved dataset object within the registry. It appears that within the `list_saved_datasets()` function that the `saved_dataset` proto needs to use `.spec` to access the `project` attribute. This would be consistent with the `get_saved_dataset()` function:\r\n\r\nhttps://github.com/feast-dev/feast/blob/c75a01fce2d52cd18479ace748b8eb2e6c81c988/sdk/python/feast/infra/registry/proto_registry_utils.py#L123\r\n\n", "before_files": [{"content": "import uuid\nfrom typing import List, Optional\n\nfrom feast import usage\nfrom feast.data_source import DataSource\nfrom feast.entity import Entity\nfrom feast.errors import (\n DataSourceObjectNotFoundException,\n EntityNotFoundException,\n FeatureServiceNotFoundException,\n FeatureViewNotFoundException,\n SavedDatasetNotFound,\n ValidationReferenceNotFound,\n)\nfrom feast.feature_service import FeatureService\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import OnDemandFeatureView\nfrom feast.project_metadata import ProjectMetadata\nfrom feast.protos.feast.core.Registry_pb2 import ProjectMetadata as ProjectMetadataProto\nfrom feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto\nfrom feast.request_feature_view import RequestFeatureView\nfrom feast.saved_dataset import SavedDataset, ValidationReference\nfrom feast.stream_feature_view import StreamFeatureView\n\n\ndef init_project_metadata(cached_registry_proto: RegistryProto, project: str):\n new_project_uuid = f\"{uuid.uuid4()}\"\n usage.set_current_project_uuid(new_project_uuid)\n cached_registry_proto.project_metadata.append(\n ProjectMetadata(project_name=project, project_uuid=new_project_uuid).to_proto()\n )\n\n\ndef get_project_metadata(\n registry_proto: Optional[RegistryProto], project: str\n) -> Optional[ProjectMetadataProto]:\n if not registry_proto:\n return None\n for pm in registry_proto.project_metadata:\n if pm.project == project:\n return pm\n return None\n\n\ndef get_feature_service(\n registry_proto: RegistryProto, name: str, project: str\n) -> FeatureService:\n for feature_service_proto in registry_proto.feature_services:\n if (\n feature_service_proto.spec.project == project\n and feature_service_proto.spec.name == name\n ):\n return FeatureService.from_proto(feature_service_proto)\n raise FeatureServiceNotFoundException(name, project=project)\n\n\ndef get_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> FeatureView:\n for feature_view_proto in registry_proto.feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return FeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_stream_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> StreamFeatureView:\n for feature_view_proto in registry_proto.stream_feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return StreamFeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_request_feature_view(registry_proto: RegistryProto, name: str, project: str):\n for feature_view_proto in registry_proto.feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return RequestFeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_on_demand_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> OnDemandFeatureView:\n for on_demand_feature_view in registry_proto.on_demand_feature_views:\n if (\n on_demand_feature_view.spec.project == project\n and on_demand_feature_view.spec.name == name\n ):\n return OnDemandFeatureView.from_proto(on_demand_feature_view)\n raise FeatureViewNotFoundException(name, project=project)\n\n\ndef get_data_source(\n registry_proto: RegistryProto, name: str, project: str\n) -> DataSource:\n for data_source in registry_proto.data_sources:\n if data_source.project == project and data_source.name == name:\n return DataSource.from_proto(data_source)\n raise DataSourceObjectNotFoundException(name, project=project)\n\n\ndef get_entity(registry_proto: RegistryProto, name: str, project: str) -> Entity:\n for entity_proto in registry_proto.entities:\n if entity_proto.spec.name == name and entity_proto.spec.project == project:\n return Entity.from_proto(entity_proto)\n raise EntityNotFoundException(name, project=project)\n\n\ndef get_saved_dataset(\n registry_proto: RegistryProto, name: str, project: str\n) -> SavedDataset:\n for saved_dataset in registry_proto.saved_datasets:\n if saved_dataset.spec.name == name and saved_dataset.spec.project == project:\n return SavedDataset.from_proto(saved_dataset)\n raise SavedDatasetNotFound(name, project=project)\n\n\ndef get_validation_reference(\n registry_proto: RegistryProto, name: str, project: str\n) -> ValidationReference:\n for validation_reference in registry_proto.validation_references:\n if (\n validation_reference.name == name\n and validation_reference.project == project\n ):\n return ValidationReference.from_proto(validation_reference)\n raise ValidationReferenceNotFound(name, project=project)\n\n\ndef list_feature_services(\n registry_proto: RegistryProto, project: str, allow_cache: bool = False\n) -> List[FeatureService]:\n feature_services = []\n for feature_service_proto in registry_proto.feature_services:\n if feature_service_proto.spec.project == project:\n feature_services.append(FeatureService.from_proto(feature_service_proto))\n return feature_services\n\n\ndef list_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[FeatureView]:\n feature_views: List[FeatureView] = []\n for feature_view_proto in registry_proto.feature_views:\n if feature_view_proto.spec.project == project:\n feature_views.append(FeatureView.from_proto(feature_view_proto))\n return feature_views\n\n\ndef list_request_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[RequestFeatureView]:\n feature_views: List[RequestFeatureView] = []\n for request_feature_view_proto in registry_proto.request_feature_views:\n if request_feature_view_proto.spec.project == project:\n feature_views.append(\n RequestFeatureView.from_proto(request_feature_view_proto)\n )\n return feature_views\n\n\ndef list_stream_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[StreamFeatureView]:\n stream_feature_views = []\n for stream_feature_view in registry_proto.stream_feature_views:\n if stream_feature_view.spec.project == project:\n stream_feature_views.append(\n StreamFeatureView.from_proto(stream_feature_view)\n )\n return stream_feature_views\n\n\ndef list_on_demand_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[OnDemandFeatureView]:\n on_demand_feature_views = []\n for on_demand_feature_view in registry_proto.on_demand_feature_views:\n if on_demand_feature_view.spec.project == project:\n on_demand_feature_views.append(\n OnDemandFeatureView.from_proto(on_demand_feature_view)\n )\n return on_demand_feature_views\n\n\ndef list_entities(registry_proto: RegistryProto, project: str) -> List[Entity]:\n entities = []\n for entity_proto in registry_proto.entities:\n if entity_proto.spec.project == project:\n entities.append(Entity.from_proto(entity_proto))\n return entities\n\n\ndef list_data_sources(registry_proto: RegistryProto, project: str) -> List[DataSource]:\n data_sources = []\n for data_source_proto in registry_proto.data_sources:\n if data_source_proto.project == project:\n data_sources.append(DataSource.from_proto(data_source_proto))\n return data_sources\n\n\ndef list_saved_datasets(\n registry_proto: RegistryProto, project: str\n) -> List[SavedDataset]:\n saved_datasets = []\n for saved_dataset in registry_proto.saved_datasets:\n if saved_dataset.project == project:\n saved_datasets.append(SavedDataset.from_proto(saved_dataset))\n return saved_datasets\n\n\ndef list_validation_references(\n registry_proto: RegistryProto, project: str\n) -> List[ValidationReference]:\n validation_references = []\n for validation_reference in registry_proto.validation_references:\n if validation_reference.project == project:\n validation_references.append(\n ValidationReference.from_proto(validation_reference)\n )\n return validation_references\n\n\ndef list_project_metadata(\n registry_proto: RegistryProto, project: str\n) -> List[ProjectMetadata]:\n return [\n ProjectMetadata.from_proto(project_metadata)\n for project_metadata in registry_proto.project_metadata\n if project_metadata.project == project\n ]\n", "path": "sdk/python/feast/infra/registry/proto_registry_utils.py"}], "after_files": [{"content": "import uuid\nfrom typing import List, Optional\n\nfrom feast import usage\nfrom feast.data_source import DataSource\nfrom feast.entity import Entity\nfrom feast.errors import (\n DataSourceObjectNotFoundException,\n EntityNotFoundException,\n FeatureServiceNotFoundException,\n FeatureViewNotFoundException,\n SavedDatasetNotFound,\n ValidationReferenceNotFound,\n)\nfrom feast.feature_service import FeatureService\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import OnDemandFeatureView\nfrom feast.project_metadata import ProjectMetadata\nfrom feast.protos.feast.core.Registry_pb2 import ProjectMetadata as ProjectMetadataProto\nfrom feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto\nfrom feast.request_feature_view import RequestFeatureView\nfrom feast.saved_dataset import SavedDataset, ValidationReference\nfrom feast.stream_feature_view import StreamFeatureView\n\n\ndef init_project_metadata(cached_registry_proto: RegistryProto, project: str):\n new_project_uuid = f\"{uuid.uuid4()}\"\n usage.set_current_project_uuid(new_project_uuid)\n cached_registry_proto.project_metadata.append(\n ProjectMetadata(project_name=project, project_uuid=new_project_uuid).to_proto()\n )\n\n\ndef get_project_metadata(\n registry_proto: Optional[RegistryProto], project: str\n) -> Optional[ProjectMetadataProto]:\n if not registry_proto:\n return None\n for pm in registry_proto.project_metadata:\n if pm.project == project:\n return pm\n return None\n\n\ndef get_feature_service(\n registry_proto: RegistryProto, name: str, project: str\n) -> FeatureService:\n for feature_service_proto in registry_proto.feature_services:\n if (\n feature_service_proto.spec.project == project\n and feature_service_proto.spec.name == name\n ):\n return FeatureService.from_proto(feature_service_proto)\n raise FeatureServiceNotFoundException(name, project=project)\n\n\ndef get_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> FeatureView:\n for feature_view_proto in registry_proto.feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return FeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_stream_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> StreamFeatureView:\n for feature_view_proto in registry_proto.stream_feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return StreamFeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_request_feature_view(registry_proto: RegistryProto, name: str, project: str):\n for feature_view_proto in registry_proto.feature_views:\n if (\n feature_view_proto.spec.name == name\n and feature_view_proto.spec.project == project\n ):\n return RequestFeatureView.from_proto(feature_view_proto)\n raise FeatureViewNotFoundException(name, project)\n\n\ndef get_on_demand_feature_view(\n registry_proto: RegistryProto, name: str, project: str\n) -> OnDemandFeatureView:\n for on_demand_feature_view in registry_proto.on_demand_feature_views:\n if (\n on_demand_feature_view.spec.project == project\n and on_demand_feature_view.spec.name == name\n ):\n return OnDemandFeatureView.from_proto(on_demand_feature_view)\n raise FeatureViewNotFoundException(name, project=project)\n\n\ndef get_data_source(\n registry_proto: RegistryProto, name: str, project: str\n) -> DataSource:\n for data_source in registry_proto.data_sources:\n if data_source.project == project and data_source.name == name:\n return DataSource.from_proto(data_source)\n raise DataSourceObjectNotFoundException(name, project=project)\n\n\ndef get_entity(registry_proto: RegistryProto, name: str, project: str) -> Entity:\n for entity_proto in registry_proto.entities:\n if entity_proto.spec.name == name and entity_proto.spec.project == project:\n return Entity.from_proto(entity_proto)\n raise EntityNotFoundException(name, project=project)\n\n\ndef get_saved_dataset(\n registry_proto: RegistryProto, name: str, project: str\n) -> SavedDataset:\n for saved_dataset in registry_proto.saved_datasets:\n if saved_dataset.spec.name == name and saved_dataset.spec.project == project:\n return SavedDataset.from_proto(saved_dataset)\n raise SavedDatasetNotFound(name, project=project)\n\n\ndef get_validation_reference(\n registry_proto: RegistryProto, name: str, project: str\n) -> ValidationReference:\n for validation_reference in registry_proto.validation_references:\n if (\n validation_reference.name == name\n and validation_reference.project == project\n ):\n return ValidationReference.from_proto(validation_reference)\n raise ValidationReferenceNotFound(name, project=project)\n\n\ndef list_feature_services(\n registry_proto: RegistryProto, project: str, allow_cache: bool = False\n) -> List[FeatureService]:\n feature_services = []\n for feature_service_proto in registry_proto.feature_services:\n if feature_service_proto.spec.project == project:\n feature_services.append(FeatureService.from_proto(feature_service_proto))\n return feature_services\n\n\ndef list_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[FeatureView]:\n feature_views: List[FeatureView] = []\n for feature_view_proto in registry_proto.feature_views:\n if feature_view_proto.spec.project == project:\n feature_views.append(FeatureView.from_proto(feature_view_proto))\n return feature_views\n\n\ndef list_request_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[RequestFeatureView]:\n feature_views: List[RequestFeatureView] = []\n for request_feature_view_proto in registry_proto.request_feature_views:\n if request_feature_view_proto.spec.project == project:\n feature_views.append(\n RequestFeatureView.from_proto(request_feature_view_proto)\n )\n return feature_views\n\n\ndef list_stream_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[StreamFeatureView]:\n stream_feature_views = []\n for stream_feature_view in registry_proto.stream_feature_views:\n if stream_feature_view.spec.project == project:\n stream_feature_views.append(\n StreamFeatureView.from_proto(stream_feature_view)\n )\n return stream_feature_views\n\n\ndef list_on_demand_feature_views(\n registry_proto: RegistryProto, project: str\n) -> List[OnDemandFeatureView]:\n on_demand_feature_views = []\n for on_demand_feature_view in registry_proto.on_demand_feature_views:\n if on_demand_feature_view.spec.project == project:\n on_demand_feature_views.append(\n OnDemandFeatureView.from_proto(on_demand_feature_view)\n )\n return on_demand_feature_views\n\n\ndef list_entities(registry_proto: RegistryProto, project: str) -> List[Entity]:\n entities = []\n for entity_proto in registry_proto.entities:\n if entity_proto.spec.project == project:\n entities.append(Entity.from_proto(entity_proto))\n return entities\n\n\ndef list_data_sources(registry_proto: RegistryProto, project: str) -> List[DataSource]:\n data_sources = []\n for data_source_proto in registry_proto.data_sources:\n if data_source_proto.project == project:\n data_sources.append(DataSource.from_proto(data_source_proto))\n return data_sources\n\n\ndef list_saved_datasets(\n registry_proto: RegistryProto, project: str\n) -> List[SavedDataset]:\n saved_datasets = []\n for saved_dataset in registry_proto.saved_datasets:\n if saved_dataset.spec.project == project:\n saved_datasets.append(SavedDataset.from_proto(saved_dataset))\n return saved_datasets\n\n\ndef list_validation_references(\n registry_proto: RegistryProto, project: str\n) -> List[ValidationReference]:\n validation_references = []\n for validation_reference in registry_proto.validation_references:\n if validation_reference.project == project:\n validation_references.append(\n ValidationReference.from_proto(validation_reference)\n )\n return validation_references\n\n\ndef list_project_metadata(\n registry_proto: RegistryProto, project: str\n) -> List[ProjectMetadata]:\n return [\n ProjectMetadata.from_proto(project_metadata)\n for project_metadata in registry_proto.project_metadata\n if project_metadata.project == project\n ]\n", "path": "sdk/python/feast/infra/registry/proto_registry_utils.py"}]}
3,941
136
gh_patches_debug_10819
rasdani/github-patches
git_diff
aws-powertools__powertools-lambda-python-921
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DeprecationWarning for distutils in Python 3.10 (will be removed in Python 3.12) <!--- Provide a general summary of the issue in the Title above --> <!--- How has this issue affected you? What are you trying to accomplish? --> **What were you trying to accomplish?** Run tests utilizing aws-lambda-powertools. ## Expected Behavior <!--- If you're describing a bug, tell us what should happen --> <!--- If you're suggesting a change/improvement, tell us how it should work --> No DeprecationWarning to appear. ## Current Behavior <!--- If describing a bug, tell us what happens instead of the expected behavior --> <!--- If suggesting a change/improvement, explain the difference from current behavior --> When running tests (specifically using `pytest`) that import `aws_lamdba_powertools`, a `DeprecationWarning` appears. ``` .../aws_lambda_powertools/shared/functions.py:1: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives from distutils.util import strtobool ``` ## Possible Solution <!--- Not obligatory, but suggest a fix/reason for the bug, --> <!--- or ideas how to implement the addition or change --> The only function used out of distutils is `strtobool`, and it is only used in one place, so that logic can simply be copied into the `resolve_truthy_env_var_choice` function in `aws_lambda_powertools.shared.functions`. ## Steps to Reproduce (for bugs) <!--- Provide a link to a live example, or an unambiguous set of steps to --> <!--- reproduce this bug. Include code to reproduce, if relevant --> 1. Use python3.10 2. Activate a new venv (`python3.10 -m venv .venv` and `. .venv/bin/activate`) 3. `pip install aws-lambda-powertools pytest` 4. `echo "import aws_lambda_powertools" > reproduce.py` 5. `pytest reproduce.py` ## Environment * **Powertools version used**: `1.22.0` * **Packaging format (Layers, PyPi)**: PyPi This warning appears in development workflows, but will be eventually breaking when Python 3.12 is released. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `aws_lambda_powertools/shared/functions.py` Content: ``` 1 from distutils.util import strtobool 2 from typing import Any, Optional, Union 3 4 5 def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool: 6 """Pick explicit choice over truthy env value, if available, otherwise return truthy env value 7 8 NOTE: Environment variable should be resolved by the caller. 9 10 Parameters 11 ---------- 12 env : str 13 environment variable actual value 14 choice : bool 15 explicit choice 16 17 Returns 18 ------- 19 choice : str 20 resolved choice as either bool or environment value 21 """ 22 return choice if choice is not None else strtobool(env) 23 24 25 def resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]: 26 """Pick explicit choice over env, if available, otherwise return env value received 27 28 NOTE: Environment variable should be resolved by the caller. 29 30 Parameters 31 ---------- 32 env : Any 33 environment variable actual value 34 choice : bool 35 explicit choice 36 37 Returns 38 ------- 39 choice : str 40 resolved choice as either bool or environment value 41 """ 42 return choice if choice is not None else env 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py --- a/aws_lambda_powertools/shared/functions.py +++ b/aws_lambda_powertools/shared/functions.py @@ -1,7 +1,16 @@ -from distutils.util import strtobool from typing import Any, Optional, Union +def strtobool(value): + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError("invalid truth value %r" % (value,)) + + def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool: """Pick explicit choice over truthy env value, if available, otherwise return truthy env value
{"golden_diff": "diff --git a/aws_lambda_powertools/shared/functions.py b/aws_lambda_powertools/shared/functions.py\n--- a/aws_lambda_powertools/shared/functions.py\n+++ b/aws_lambda_powertools/shared/functions.py\n@@ -1,7 +1,16 @@\n-from distutils.util import strtobool\n from typing import Any, Optional, Union\n \n \n+def strtobool(value):\n+ value = value.lower()\n+ if value in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n+ return 1\n+ elif value in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n+ return 0\n+ else:\n+ raise ValueError(\"invalid truth value %r\" % (value,))\n+\n+\n def resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n", "issue": "DeprecationWarning for distutils in Python 3.10 (will be removed in Python 3.12)\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- How has this issue affected you? What are you trying to accomplish? -->\r\n\r\n**What were you trying to accomplish?**\r\n\r\nRun tests utilizing aws-lambda-powertools.\r\n\r\n## Expected Behavior\r\n<!--- If you're describing a bug, tell us what should happen -->\r\n<!--- If you're suggesting a change/improvement, tell us how it should work -->\r\n\r\nNo DeprecationWarning to appear.\r\n\r\n## Current Behavior\r\n<!--- If describing a bug, tell us what happens instead of the expected behavior -->\r\n<!--- If suggesting a change/improvement, explain the difference from current behavior -->\r\n\r\nWhen running tests (specifically using `pytest`) that import `aws_lamdba_powertools`, a `DeprecationWarning` appears.\r\n\r\n```\r\n.../aws_lambda_powertools/shared/functions.py:1: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives\r\n from distutils.util import strtobool\r\n```\r\n\r\n## Possible Solution\r\n<!--- Not obligatory, but suggest a fix/reason for the bug, -->\r\n<!--- or ideas how to implement the addition or change -->\r\n\r\nThe only function used out of distutils is `strtobool`, and it is only used in one place, so that logic can simply be copied into the `resolve_truthy_env_var_choice` function in `aws_lambda_powertools.shared.functions`.\r\n\r\n## Steps to Reproduce (for bugs)\r\n<!--- Provide a link to a live example, or an unambiguous set of steps to -->\r\n<!--- reproduce this bug. Include code to reproduce, if relevant -->\r\n1. Use python3.10\r\n2. Activate a new venv (`python3.10 -m venv .venv` and `. .venv/bin/activate`)\r\n3. `pip install aws-lambda-powertools pytest`\r\n4. `echo \"import aws_lambda_powertools\" > reproduce.py`\r\n5. `pytest reproduce.py`\r\n\r\n## Environment\r\n\r\n* **Powertools version used**: `1.22.0`\r\n* **Packaging format (Layers, PyPi)**: PyPi\r\n\r\nThis warning appears in development workflows, but will be eventually breaking when Python 3.12 is released.\n", "before_files": [{"content": "from distutils.util import strtobool\nfrom typing import Any, Optional, Union\n\n\ndef resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : str\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else strtobool(env)\n\n\ndef resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]:\n \"\"\"Pick explicit choice over env, if available, otherwise return env value received\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : Any\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else env\n", "path": "aws_lambda_powertools/shared/functions.py"}], "after_files": [{"content": "from typing import Any, Optional, Union\n\n\ndef strtobool(value):\n value = value.lower()\n if value in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif value in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(\"invalid truth value %r\" % (value,))\n\n\ndef resolve_truthy_env_var_choice(env: str, choice: Optional[bool] = None) -> bool:\n \"\"\"Pick explicit choice over truthy env value, if available, otherwise return truthy env value\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : str\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else strtobool(env)\n\n\ndef resolve_env_var_choice(env: Any, choice: Optional[Any] = None) -> Union[bool, Any]:\n \"\"\"Pick explicit choice over env, if available, otherwise return env value received\n\n NOTE: Environment variable should be resolved by the caller.\n\n Parameters\n ----------\n env : Any\n environment variable actual value\n choice : bool\n explicit choice\n\n Returns\n -------\n choice : str\n resolved choice as either bool or environment value\n \"\"\"\n return choice if choice is not None else env\n", "path": "aws_lambda_powertools/shared/functions.py"}]}
1,103
207
gh_patches_debug_29042
rasdani/github-patches
git_diff
sanic-org__sanic-2128
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unexpected behavior using a catch-all Blueprint exception handler **Describe the bug** Using a catch-all exception handler in a Blueprint might lead to unexpected behavior. For example: ```python from sanic import Sanic, Blueprint, response from sanic.exceptions import NotFound error_handlers = Blueprint(__name__) @error_handlers.exception(NotFound) def not_found(request, exception): return response.text("Not found", status=404) @error_handlers.exception(Exception) def unhandled_exceptions(request, exception): return response.text("Unhandled exception", status=500) app = Sanic("My Hello, world app") app.blueprint(error_handlers) @app.route("/") async def test(request): return json({"hello": "world"}) if __name__ == '__main__': app.run(debug=True) ``` One might think that the `not_found` would handle all 404's, but that's not always the case, sometimes the `unhandled_exceptions` handler is being used instead, restarting the application will give "random" results. From what I can see the underlying problem is this line: https://github.com/sanic-org/sanic/blob/main/sanic/handlers.py#L67. Since all exceptions derive from `Exception` they will return `True` here when compared to the `unhandled_exceptions` exception `Exception`. So it's basically the order of the `self.handlers` that will determine which error handler to be used (if there are multiple handlers registered for the same derived exception) since it returns early on the first match. Also, the reason for "random" results between restarts seems to be that a `set` (undefined order) is used as the data structure for storing the registered exception handlers: https://github.com/sanic-org/sanic/blob/main/sanic/mixins/exceptions.py#L8 when using a Blueprint. Previously in versions <21.x this used to be a `list` and the problem above could be "circumvented" by registering the catch-all exception handler last. This is also how the `app.error_handler` seems to be working and the workaround still works for normal application routes. **Expected behavior** The explicitly registered exception handler should primarily be used even thou a catch-all handler is registered, the order when the handler was registered shouldn't matter. I would also expect the same behavior for both Blueprint and normal application routes. **Environment** - Version: 21.3.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/handlers.py` Content: ``` 1 from traceback import format_exc 2 3 from sanic.errorpages import exception_response 4 from sanic.exceptions import ( 5 ContentRangeError, 6 HeaderNotFound, 7 InvalidRangeType, 8 ) 9 from sanic.log import error_logger 10 from sanic.response import text 11 12 13 class ErrorHandler: 14 """ 15 Provide :class:`sanic.app.Sanic` application with a mechanism to handle 16 and process any and all uncaught exceptions in a way the application 17 developer will set fit. 18 19 This error handling framework is built into the core that can be extended 20 by the developers to perform a wide range of tasks from recording the error 21 stats to reporting them to an external service that can be used for 22 realtime alerting system. 23 24 """ 25 26 handlers = None 27 cached_handlers = None 28 _missing = object() 29 30 def __init__(self): 31 self.handlers = [] 32 self.cached_handlers = {} 33 self.debug = False 34 35 def add(self, exception, handler): 36 """ 37 Add a new exception handler to an already existing handler object. 38 39 :param exception: Type of exception that need to be handled 40 :param handler: Reference to the method that will handle the exception 41 42 :type exception: :class:`sanic.exceptions.SanicException` or 43 :class:`Exception` 44 :type handler: ``function`` 45 46 :return: None 47 """ 48 self.handlers.append((exception, handler)) 49 50 def lookup(self, exception): 51 """ 52 Lookup the existing instance of :class:`ErrorHandler` and fetch the 53 registered handler for a specific type of exception. 54 55 This method leverages a dict lookup to speedup the retrieval process. 56 57 :param exception: Type of exception 58 59 :type exception: :class:`sanic.exceptions.SanicException` or 60 :class:`Exception` 61 62 :return: Registered function if found ``None`` otherwise 63 """ 64 handler = self.cached_handlers.get(type(exception), self._missing) 65 if handler is self._missing: 66 for exception_class, handler in self.handlers: 67 if isinstance(exception, exception_class): 68 self.cached_handlers[type(exception)] = handler 69 return handler 70 self.cached_handlers[type(exception)] = None 71 handler = None 72 return handler 73 74 def response(self, request, exception): 75 """Fetches and executes an exception handler and returns a response 76 object 77 78 :param request: Instance of :class:`sanic.request.Request` 79 :param exception: Exception to handle 80 81 :type request: :class:`sanic.request.Request` 82 :type exception: :class:`sanic.exceptions.SanicException` or 83 :class:`Exception` 84 85 :return: Wrap the return value obtained from :func:`default` 86 or registered handler for that type of exception. 87 """ 88 handler = self.lookup(exception) 89 response = None 90 try: 91 if handler: 92 response = handler(request, exception) 93 if response is None: 94 response = self.default(request, exception) 95 except Exception: 96 self.log(format_exc()) 97 try: 98 url = repr(request.url) 99 except AttributeError: 100 url = "unknown" 101 response_message = ( 102 "Exception raised in exception handler " '"%s" for uri: %s' 103 ) 104 error_logger.exception(response_message, handler.__name__, url) 105 106 if self.debug: 107 return text(response_message % (handler.__name__, url), 500) 108 else: 109 return text("An error occurred while handling an error", 500) 110 return response 111 112 def log(self, message, level="error"): 113 """ 114 Deprecated, do not use. 115 """ 116 117 def default(self, request, exception): 118 """ 119 Provide a default behavior for the objects of :class:`ErrorHandler`. 120 If a developer chooses to extent the :class:`ErrorHandler` they can 121 provide a custom implementation for this method to behave in a way 122 they see fit. 123 124 :param request: Incoming request 125 :param exception: Exception object 126 127 :type request: :class:`sanic.request.Request` 128 :type exception: :class:`sanic.exceptions.SanicException` or 129 :class:`Exception` 130 :return: 131 """ 132 quiet = getattr(exception, "quiet", False) 133 if quiet is False: 134 try: 135 url = repr(request.url) 136 except AttributeError: 137 url = "unknown" 138 139 self.log(format_exc()) 140 error_logger.exception( 141 "Exception occurred while handling uri: %s", url 142 ) 143 144 return exception_response(request, exception, self.debug) 145 146 147 class ContentRangeHandler: 148 """ 149 A mechanism to parse and process the incoming request headers to 150 extract the content range information. 151 152 :param request: Incoming api request 153 :param stats: Stats related to the content 154 155 :type request: :class:`sanic.request.Request` 156 :type stats: :class:`posix.stat_result` 157 158 :ivar start: Content Range start 159 :ivar end: Content Range end 160 :ivar size: Length of the content 161 :ivar total: Total size identified by the :class:`posix.stat_result` 162 instance 163 :ivar ContentRangeHandler.headers: Content range header ``dict`` 164 """ 165 166 __slots__ = ("start", "end", "size", "total", "headers") 167 168 def __init__(self, request, stats): 169 self.total = stats.st_size 170 _range = request.headers.getone("range", None) 171 if _range is None: 172 raise HeaderNotFound("Range Header Not Found") 173 unit, _, value = tuple(map(str.strip, _range.partition("="))) 174 if unit != "bytes": 175 raise InvalidRangeType( 176 "%s is not a valid Range Type" % (unit,), self 177 ) 178 start_b, _, end_b = tuple(map(str.strip, value.partition("-"))) 179 try: 180 self.start = int(start_b) if start_b else None 181 except ValueError: 182 raise ContentRangeError( 183 "'%s' is invalid for Content Range" % (start_b,), self 184 ) 185 try: 186 self.end = int(end_b) if end_b else None 187 except ValueError: 188 raise ContentRangeError( 189 "'%s' is invalid for Content Range" % (end_b,), self 190 ) 191 if self.end is None: 192 if self.start is None: 193 raise ContentRangeError( 194 "Invalid for Content Range parameters", self 195 ) 196 else: 197 # this case represents `Content-Range: bytes 5-` 198 self.end = self.total - 1 199 else: 200 if self.start is None: 201 # this case represents `Content-Range: bytes -5` 202 self.start = self.total - self.end 203 self.end = self.total - 1 204 if self.start >= self.end: 205 raise ContentRangeError( 206 "Invalid for Content Range parameters", self 207 ) 208 self.size = self.end - self.start + 1 209 self.headers = { 210 "Content-Range": "bytes %s-%s/%s" 211 % (self.start, self.end, self.total) 212 } 213 214 def __bool__(self): 215 return self.size > 0 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/handlers.py b/sanic/handlers.py --- a/sanic/handlers.py +++ b/sanic/handlers.py @@ -25,7 +25,6 @@ handlers = None cached_handlers = None - _missing = object() def __init__(self): self.handlers = [] @@ -45,7 +44,9 @@ :return: None """ + # self.handlers to be deprecated and removed in version 21.12 self.handlers.append((exception, handler)) + self.cached_handlers[exception] = handler def lookup(self, exception): """ @@ -61,14 +62,19 @@ :return: Registered function if found ``None`` otherwise """ - handler = self.cached_handlers.get(type(exception), self._missing) - if handler is self._missing: - for exception_class, handler in self.handlers: - if isinstance(exception, exception_class): - self.cached_handlers[type(exception)] = handler - return handler - self.cached_handlers[type(exception)] = None - handler = None + exception_class = type(exception) + if exception_class in self.cached_handlers: + return self.cached_handlers[exception_class] + + for ancestor in type.mro(exception_class): + if ancestor in self.cached_handlers: + handler = self.cached_handlers[ancestor] + self.cached_handlers[exception_class] = handler + return handler + if ancestor is BaseException: + break + self.cached_handlers[exception_class] = None + handler = None return handler def response(self, request, exception):
{"golden_diff": "diff --git a/sanic/handlers.py b/sanic/handlers.py\n--- a/sanic/handlers.py\n+++ b/sanic/handlers.py\n@@ -25,7 +25,6 @@\n \n handlers = None\n cached_handlers = None\n- _missing = object()\n \n def __init__(self):\n self.handlers = []\n@@ -45,7 +44,9 @@\n \n :return: None\n \"\"\"\n+ # self.handlers to be deprecated and removed in version 21.12\n self.handlers.append((exception, handler))\n+ self.cached_handlers[exception] = handler\n \n def lookup(self, exception):\n \"\"\"\n@@ -61,14 +62,19 @@\n \n :return: Registered function if found ``None`` otherwise\n \"\"\"\n- handler = self.cached_handlers.get(type(exception), self._missing)\n- if handler is self._missing:\n- for exception_class, handler in self.handlers:\n- if isinstance(exception, exception_class):\n- self.cached_handlers[type(exception)] = handler\n- return handler\n- self.cached_handlers[type(exception)] = None\n- handler = None\n+ exception_class = type(exception)\n+ if exception_class in self.cached_handlers:\n+ return self.cached_handlers[exception_class]\n+\n+ for ancestor in type.mro(exception_class):\n+ if ancestor in self.cached_handlers:\n+ handler = self.cached_handlers[ancestor]\n+ self.cached_handlers[exception_class] = handler\n+ return handler\n+ if ancestor is BaseException:\n+ break\n+ self.cached_handlers[exception_class] = None\n+ handler = None\n return handler\n \n def response(self, request, exception):\n", "issue": "Unexpected behavior using a catch-all Blueprint exception handler\n**Describe the bug**\r\nUsing a catch-all exception handler in a Blueprint might lead to unexpected behavior. For example:\r\n\r\n```python\r\nfrom sanic import Sanic, Blueprint, response\r\nfrom sanic.exceptions import NotFound\r\n\r\nerror_handlers = Blueprint(__name__)\r\n\r\n\r\n@error_handlers.exception(NotFound)\r\ndef not_found(request, exception):\r\n return response.text(\"Not found\", status=404)\r\n\r\n\r\n@error_handlers.exception(Exception)\r\ndef unhandled_exceptions(request, exception):\r\n return response.text(\"Unhandled exception\", status=500)\r\n\r\n\r\napp = Sanic(\"My Hello, world app\")\r\napp.blueprint(error_handlers)\r\n\r\n\r\[email protected](\"/\")\r\nasync def test(request):\r\n return json({\"hello\": \"world\"})\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n```\r\nOne might think that the `not_found` would handle all 404's, but that's not always the case, sometimes the `unhandled_exceptions` handler is being used instead, restarting the application will give \"random\" results.\r\n\r\nFrom what I can see the underlying problem is this line: https://github.com/sanic-org/sanic/blob/main/sanic/handlers.py#L67.\r\n\r\nSince all exceptions derive from `Exception` they will return `True` here when compared to the `unhandled_exceptions` exception `Exception`. So it's basically the order of the `self.handlers` that will determine which error handler to be used (if there are multiple handlers registered for the same derived exception) since it returns early on the first match.\r\n\r\nAlso, the reason for \"random\" results between restarts seems to be that a `set` (undefined order) is used as the data structure for storing the registered exception handlers: https://github.com/sanic-org/sanic/blob/main/sanic/mixins/exceptions.py#L8 when using a Blueprint.\r\n\r\nPreviously in versions <21.x this used to be a `list` and the problem above could be \"circumvented\" by registering the catch-all exception handler last. This is also how the `app.error_handler` seems to be working and the workaround still works for normal application routes.\r\n\r\n**Expected behavior**\r\nThe explicitly registered exception handler should primarily be used even thou a catch-all handler is registered, the order when the handler was registered shouldn't matter. I would also expect the same behavior for both Blueprint and normal application routes.\r\n\r\n**Environment**\r\n - Version: 21.3.2\r\n\n", "before_files": [{"content": "from traceback import format_exc\n\nfrom sanic.errorpages import exception_response\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n InvalidRangeType,\n)\nfrom sanic.log import error_logger\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n handlers = None\n cached_handlers = None\n _missing = object()\n\n def __init__(self):\n self.handlers = []\n self.cached_handlers = {}\n self.debug = False\n\n def add(self, exception, handler):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n self.handlers.append((exception, handler))\n\n def lookup(self, exception):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n handler = self.cached_handlers.get(type(exception), self._missing)\n if handler is self._missing:\n for exception_class, handler in self.handlers:\n if isinstance(exception, exception_class):\n self.cached_handlers[type(exception)] = handler\n return handler\n self.cached_handlers[type(exception)] = None\n handler = None\n return handler\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n handler = self.lookup(exception)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n self.log(format_exc())\n try:\n url = repr(request.url)\n except AttributeError:\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def log(self, message, level=\"error\"):\n \"\"\"\n Deprecated, do not use.\n \"\"\"\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n quiet = getattr(exception, \"quiet\", False)\n if quiet is False:\n try:\n url = repr(request.url)\n except AttributeError:\n url = \"unknown\"\n\n self.log(format_exc())\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n return exception_response(request, exception, self.debug)\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}], "after_files": [{"content": "from traceback import format_exc\n\nfrom sanic.errorpages import exception_response\nfrom sanic.exceptions import (\n ContentRangeError,\n HeaderNotFound,\n InvalidRangeType,\n)\nfrom sanic.log import error_logger\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n handlers = None\n cached_handlers = None\n\n def __init__(self):\n self.handlers = []\n self.cached_handlers = {}\n self.debug = False\n\n def add(self, exception, handler):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n # self.handlers to be deprecated and removed in version 21.12\n self.handlers.append((exception, handler))\n self.cached_handlers[exception] = handler\n\n def lookup(self, exception):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n exception_class = type(exception)\n if exception_class in self.cached_handlers:\n return self.cached_handlers[exception_class]\n\n for ancestor in type.mro(exception_class):\n if ancestor in self.cached_handlers:\n handler = self.cached_handlers[ancestor]\n self.cached_handlers[exception_class] = handler\n return handler\n if ancestor is BaseException:\n break\n self.cached_handlers[exception_class] = None\n handler = None\n return handler\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n handler = self.lookup(exception)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n self.log(format_exc())\n try:\n url = repr(request.url)\n except AttributeError:\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def log(self, message, level=\"error\"):\n \"\"\"\n Deprecated, do not use.\n \"\"\"\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n quiet = getattr(exception, \"quiet\", False)\n if quiet is False:\n try:\n url = repr(request.url)\n except AttributeError:\n url = \"unknown\"\n\n self.log(format_exc())\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n return exception_response(request, exception, self.debug)\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise ContentRangeError(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise ContentRangeError(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}]}
2,848
385
gh_patches_debug_42741
rasdani/github-patches
git_diff
nextcloud__appstore-42
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- rewrite info.xml on upload to match required schema order --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nextcloudappstore/core/api/v1/release/parser.py` Content: ``` 1 import re 2 import tarfile # type: ignore 3 import lxml.etree # type: ignore 4 from typing import Dict, Any 5 6 from nextcloudappstore.core.api.v1.release import ReleaseConfig 7 from rest_framework.exceptions import APIException # type: ignore 8 9 10 class MaxSizeAppMetadataXmlException(APIException): 11 pass 12 13 14 class InvalidAppMetadataXmlException(APIException): 15 pass 16 17 18 class UnsupportedAppArchiveException(APIException): 19 pass 20 21 22 class InvalidAppPackageStructureException(APIException): 23 pass 24 25 26 class GunZipAppMetadataExtractor: 27 def __init__(self, config: ReleaseConfig) -> None: 28 """ 29 :argument config the config 30 """ 31 self.config = config 32 self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*$') 33 34 def extract_app_metadata(self, archive_path: str) -> str: 35 """ 36 Extracts the info.xml from an tar.gz archive 37 :argument archive_path the path to the tar.gz archive 38 :raises InvalidAppPackageStructureException if the first level folder 39 does not equal the app_id or no info.xml file could be found in the 40 appinfo folder 41 :return the info.xml as string 42 """ 43 if not tarfile.is_tarfile(archive_path): 44 msg = '%s is not a valid tar.gz archive ' % archive_path 45 raise UnsupportedAppArchiveException(msg) 46 47 with tarfile.open(archive_path, 'r:gz') as tar: 48 result = self._parse_archive(tar) 49 return result 50 51 def _parse_archive(self, tar: Any) -> str: 52 folder = list( 53 filter(lambda name: re.match(self.app_folder_regex, name), 54 tar.getnames() 55 ) 56 ) 57 if len(folder) > 1: 58 msg = 'More than one possible app folder found' 59 raise InvalidAppPackageStructureException(msg) 60 elif len(folder) == 0: 61 msg = 'No possible app folder found. App folder must contain ' \ 62 'only lowercase ASCII characters or underscores' 63 raise InvalidAppPackageStructureException(msg) 64 65 info_path = '%s/appinfo/info.xml' % folder[0] 66 try: 67 info_member = tar.getmember(info_path) # type: ignore 68 if info_member.issym() or info_member.islnk(): 69 msg = 'Symlinks and hard links can not be used for info.xml ' \ 70 'files' 71 raise InvalidAppPackageStructureException(msg) 72 if info_member.size > self.config.max_info_size: 73 msg = '%s was bigger than allowed %i bytes' % ( 74 info_path, self.config.max_info_size) 75 raise MaxSizeAppMetadataXmlException(msg) 76 info_file = tar.extractfile(info_member) 77 return info_file.read().decode('utf-8') 78 except KeyError: 79 msg = 'Could not find %s file inside the archive' % info_path 80 raise InvalidAppPackageStructureException(msg) 81 82 83 def element_to_dict(element: Any) -> Dict: 84 type = element.get('type') 85 key = element.tag.replace('-', '_') 86 if type == 'int': 87 return {key: int(element.text)} 88 elif type == 'list': 89 return {key: list(map(element_to_dict, element.iterchildren()))} 90 elif len(list(element)) > 0: 91 contents = {} 92 for child in element.iterchildren(): 93 contents.update(element_to_dict(child)) 94 return {key: contents} 95 else: 96 return {key: element.text} 97 98 99 def parse_app_metadata(xml: str, schema: str, xslt: str) -> Dict: 100 """ 101 Parses, validates and maps the xml onto a dict 102 :argument xml the info.xml string to parse 103 :argument schema the schema xml as string 104 :argument xslt the xslt to transform it to a matching structure 105 :raises InvalidAppMetadataXmlException if the schema does not validate 106 :return the parsed xml as dict 107 """ 108 parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True, 109 remove_comments=True, load_dtd=False, 110 remove_blank_text=True, dtd_validation=False) 111 schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser) 112 doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser) 113 for _ in doc.iter(lxml.etree.Entity): 114 raise InvalidAppMetadataXmlException('Must not contain entities') 115 schema = lxml.etree.XMLSchema(schema_doc) 116 try: 117 schema.assertValid(doc) # type: ignore 118 except lxml.etree.DocumentInvalid as e: 119 msg = 'info.xml did not validate: %s' % e 120 raise InvalidAppMetadataXmlException(msg) 121 transform = lxml.etree.XSLT(lxml.etree.XML(xslt)) 122 transformed_doc = transform(doc) 123 mapped = element_to_dict(transformed_doc.getroot()) 124 return mapped 125 ``` Path: `nextcloudappstore/core/api/v1/release/__init__.py` Content: ``` 1 from django.conf import settings # type: ignore 2 from nextcloudappstore.core.facades import read_relative_file 3 4 5 class ReleaseConfig: 6 def __init__(self) -> None: 7 self.max_info_size = 512 * 1024 # bytes 8 self.download_root = settings.RELEASE_DOWNLOAD_ROOT # type: str 9 self.download_max_timeout = 60 10 self.download_max_redirects = 10 11 self.download_max_size = 20 * (1024 ** 2) 12 self.info_schema = read_relative_file(__file__, 'info.xsd') 13 self.info_xslt = read_relative_file(__file__, 'info.xslt') 14 ``` Path: `nextcloudappstore/core/api/v1/release/provider.py` Content: ``` 1 from nextcloudappstore.core.api.v1.release import ReleaseConfig 2 from nextcloudappstore.core.api.v1.release.downloader import \ 3 AppReleaseDownloader 4 from nextcloudappstore.core.api.v1.release.parser import \ 5 GunZipAppMetadataExtractor, parse_app_metadata 6 from hashlib import sha256 7 from typing import Dict 8 9 10 class AppReleaseProvider: 11 def __init__(self, downloader: AppReleaseDownloader, 12 extractor: GunZipAppMetadataExtractor, 13 config: ReleaseConfig) -> None: 14 self.config = config 15 self.extractor = extractor 16 self.downloader = downloader 17 18 def get_release_info(self, url: str) -> Dict: 19 with self.downloader.get_archive( 20 url, self.config.download_root, self.config.download_max_timeout, 21 self.config.download_max_redirects, self.config.download_max_size 22 ) as download: 23 xml = self.extractor.extract_app_metadata(download.filename) 24 info = parse_app_metadata(xml, self.config.info_schema, 25 self.config.info_xslt) 26 # generate sha256sum for archive 27 with open(download.filename, 'rb') as f: 28 checksum = sha256(f.read()).hexdigest() 29 info['app']['release']['checksum'] = checksum 30 return info 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nextcloudappstore/core/api/v1/release/__init__.py b/nextcloudappstore/core/api/v1/release/__init__.py --- a/nextcloudappstore/core/api/v1/release/__init__.py +++ b/nextcloudappstore/core/api/v1/release/__init__.py @@ -11,3 +11,4 @@ self.download_max_size = 20 * (1024 ** 2) self.info_schema = read_relative_file(__file__, 'info.xsd') self.info_xslt = read_relative_file(__file__, 'info.xslt') + self.pre_info_xslt = read_relative_file(__file__, 'pre-info.xslt') diff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py --- a/nextcloudappstore/core/api/v1/release/parser.py +++ b/nextcloudappstore/core/api/v1/release/parser.py @@ -23,6 +23,10 @@ pass +class XMLSyntaxError(APIException): + pass + + class GunZipAppMetadataExtractor: def __init__(self, config: ReleaseConfig) -> None: """ @@ -96,29 +100,39 @@ return {key: element.text} -def parse_app_metadata(xml: str, schema: str, xslt: str) -> Dict: +def parse_app_metadata(xml: str, schema: str, pre_xslt: str, + xslt: str) -> Dict: """ Parses, validates and maps the xml onto a dict :argument xml the info.xml string to parse :argument schema the schema xml as string + :argument pre_xslt xslt which is run before validation to ensure that + everything is in the correct order and that unknown elements are excluded :argument xslt the xslt to transform it to a matching structure :raises InvalidAppMetadataXmlException if the schema does not validate :return the parsed xml as dict """ parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True, remove_comments=True, load_dtd=False, - remove_blank_text=True, dtd_validation=False) - schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser) - doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser) + remove_blank_text=True, dtd_validation=False + ) + try: + doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser) + except lxml.etree.XMLSyntaxError as e: + msg = 'info.xml contains malformed xml: %s' % e + raise XMLSyntaxError(msg) for _ in doc.iter(lxml.etree.Entity): raise InvalidAppMetadataXmlException('Must not contain entities') + pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt)) + pre_transformed_doc = pre_transform(doc) + schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser) schema = lxml.etree.XMLSchema(schema_doc) try: - schema.assertValid(doc) # type: ignore + schema.assertValid(pre_transformed_doc) # type: ignore except lxml.etree.DocumentInvalid as e: msg = 'info.xml did not validate: %s' % e raise InvalidAppMetadataXmlException(msg) transform = lxml.etree.XSLT(lxml.etree.XML(xslt)) - transformed_doc = transform(doc) + transformed_doc = transform(pre_transformed_doc) mapped = element_to_dict(transformed_doc.getroot()) return mapped diff --git a/nextcloudappstore/core/api/v1/release/provider.py b/nextcloudappstore/core/api/v1/release/provider.py --- a/nextcloudappstore/core/api/v1/release/provider.py +++ b/nextcloudappstore/core/api/v1/release/provider.py @@ -22,6 +22,7 @@ ) as download: xml = self.extractor.extract_app_metadata(download.filename) info = parse_app_metadata(xml, self.config.info_schema, + self.config.pre_info_xslt, self.config.info_xslt) # generate sha256sum for archive with open(download.filename, 'rb') as f:
{"golden_diff": "diff --git a/nextcloudappstore/core/api/v1/release/__init__.py b/nextcloudappstore/core/api/v1/release/__init__.py\n--- a/nextcloudappstore/core/api/v1/release/__init__.py\n+++ b/nextcloudappstore/core/api/v1/release/__init__.py\n@@ -11,3 +11,4 @@\n self.download_max_size = 20 * (1024 ** 2)\n self.info_schema = read_relative_file(__file__, 'info.xsd')\n self.info_xslt = read_relative_file(__file__, 'info.xslt')\n+ self.pre_info_xslt = read_relative_file(__file__, 'pre-info.xslt')\ndiff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py\n--- a/nextcloudappstore/core/api/v1/release/parser.py\n+++ b/nextcloudappstore/core/api/v1/release/parser.py\n@@ -23,6 +23,10 @@\n pass\n \n \n+class XMLSyntaxError(APIException):\n+ pass\n+\n+\n class GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n@@ -96,29 +100,39 @@\n return {key: element.text}\n \n \n-def parse_app_metadata(xml: str, schema: str, xslt: str) -> Dict:\n+def parse_app_metadata(xml: str, schema: str, pre_xslt: str,\n+ xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n+ :argument pre_xslt xslt which is run before validation to ensure that\n+ everything is in the correct order and that unknown elements are excluded\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n- remove_blank_text=True, dtd_validation=False)\n- schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n- doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n+ remove_blank_text=True, dtd_validation=False\n+ )\n+ try:\n+ doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n+ except lxml.etree.XMLSyntaxError as e:\n+ msg = 'info.xml contains malformed xml: %s' % e\n+ raise XMLSyntaxError(msg)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n+ pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))\n+ pre_transformed_doc = pre_transform(doc)\n+ schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n- schema.assertValid(doc) # type: ignore\n+ schema.assertValid(pre_transformed_doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n- transformed_doc = transform(doc)\n+ transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n return mapped\ndiff --git a/nextcloudappstore/core/api/v1/release/provider.py b/nextcloudappstore/core/api/v1/release/provider.py\n--- a/nextcloudappstore/core/api/v1/release/provider.py\n+++ b/nextcloudappstore/core/api/v1/release/provider.py\n@@ -22,6 +22,7 @@\n ) as download:\n xml = self.extractor.extract_app_metadata(download.filename)\n info = parse_app_metadata(xml, self.config.info_schema,\n+ self.config.pre_info_xslt,\n self.config.info_xslt)\n # generate sha256sum for archive\n with open(download.filename, 'rb') as f:\n", "issue": "rewrite info.xml on upload to match required schema order\n\n", "before_files": [{"content": "import re\nimport tarfile # type: ignore\nimport lxml.etree # type: ignore\nfrom typing import Dict, Any\n\nfrom nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom rest_framework.exceptions import APIException # type: ignore\n\n\nclass MaxSizeAppMetadataXmlException(APIException):\n pass\n\n\nclass InvalidAppMetadataXmlException(APIException):\n pass\n\n\nclass UnsupportedAppArchiveException(APIException):\n pass\n\n\nclass InvalidAppPackageStructureException(APIException):\n pass\n\n\nclass GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n :argument config the config\n \"\"\"\n self.config = config\n self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*$')\n\n def extract_app_metadata(self, archive_path: str) -> str:\n \"\"\"\n Extracts the info.xml from an tar.gz archive\n :argument archive_path the path to the tar.gz archive\n :raises InvalidAppPackageStructureException if the first level folder\n does not equal the app_id or no info.xml file could be found in the\n appinfo folder\n :return the info.xml as string\n \"\"\"\n if not tarfile.is_tarfile(archive_path):\n msg = '%s is not a valid tar.gz archive ' % archive_path\n raise UnsupportedAppArchiveException(msg)\n\n with tarfile.open(archive_path, 'r:gz') as tar:\n result = self._parse_archive(tar)\n return result\n\n def _parse_archive(self, tar: Any) -> str:\n folder = list(\n filter(lambda name: re.match(self.app_folder_regex, name),\n tar.getnames()\n )\n )\n if len(folder) > 1:\n msg = 'More than one possible app folder found'\n raise InvalidAppPackageStructureException(msg)\n elif len(folder) == 0:\n msg = 'No possible app folder found. App folder must contain ' \\\n 'only lowercase ASCII characters or underscores'\n raise InvalidAppPackageStructureException(msg)\n\n info_path = '%s/appinfo/info.xml' % folder[0]\n try:\n info_member = tar.getmember(info_path) # type: ignore\n if info_member.issym() or info_member.islnk():\n msg = 'Symlinks and hard links can not be used for info.xml ' \\\n 'files'\n raise InvalidAppPackageStructureException(msg)\n if info_member.size > self.config.max_info_size:\n msg = '%s was bigger than allowed %i bytes' % (\n info_path, self.config.max_info_size)\n raise MaxSizeAppMetadataXmlException(msg)\n info_file = tar.extractfile(info_member)\n return info_file.read().decode('utf-8')\n except KeyError:\n msg = 'Could not find %s file inside the archive' % info_path\n raise InvalidAppPackageStructureException(msg)\n\n\ndef element_to_dict(element: Any) -> Dict:\n type = element.get('type')\n key = element.tag.replace('-', '_')\n if type == 'int':\n return {key: int(element.text)}\n elif type == 'list':\n return {key: list(map(element_to_dict, element.iterchildren()))}\n elif len(list(element)) > 0:\n contents = {}\n for child in element.iterchildren():\n contents.update(element_to_dict(child))\n return {key: contents}\n else:\n return {key: element.text}\n\n\ndef parse_app_metadata(xml: str, schema: str, xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n remove_blank_text=True, dtd_validation=False)\n schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n schema.assertValid(doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(doc)\n mapped = element_to_dict(transformed_doc.getroot())\n return mapped\n", "path": "nextcloudappstore/core/api/v1/release/parser.py"}, {"content": "from django.conf import settings # type: ignore\nfrom nextcloudappstore.core.facades import read_relative_file\n\n\nclass ReleaseConfig:\n def __init__(self) -> None:\n self.max_info_size = 512 * 1024 # bytes\n self.download_root = settings.RELEASE_DOWNLOAD_ROOT # type: str\n self.download_max_timeout = 60\n self.download_max_redirects = 10\n self.download_max_size = 20 * (1024 ** 2)\n self.info_schema = read_relative_file(__file__, 'info.xsd')\n self.info_xslt = read_relative_file(__file__, 'info.xslt')\n", "path": "nextcloudappstore/core/api/v1/release/__init__.py"}, {"content": "from nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom nextcloudappstore.core.api.v1.release.downloader import \\\n AppReleaseDownloader\nfrom nextcloudappstore.core.api.v1.release.parser import \\\n GunZipAppMetadataExtractor, parse_app_metadata\nfrom hashlib import sha256\nfrom typing import Dict\n\n\nclass AppReleaseProvider:\n def __init__(self, downloader: AppReleaseDownloader,\n extractor: GunZipAppMetadataExtractor,\n config: ReleaseConfig) -> None:\n self.config = config\n self.extractor = extractor\n self.downloader = downloader\n\n def get_release_info(self, url: str) -> Dict:\n with self.downloader.get_archive(\n url, self.config.download_root, self.config.download_max_timeout,\n self.config.download_max_redirects, self.config.download_max_size\n ) as download:\n xml = self.extractor.extract_app_metadata(download.filename)\n info = parse_app_metadata(xml, self.config.info_schema,\n self.config.info_xslt)\n # generate sha256sum for archive\n with open(download.filename, 'rb') as f:\n checksum = sha256(f.read()).hexdigest()\n info['app']['release']['checksum'] = checksum\n return info\n", "path": "nextcloudappstore/core/api/v1/release/provider.py"}], "after_files": [{"content": "import re\nimport tarfile # type: ignore\nimport lxml.etree # type: ignore\nfrom typing import Dict, Any\n\nfrom nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom rest_framework.exceptions import APIException # type: ignore\n\n\nclass MaxSizeAppMetadataXmlException(APIException):\n pass\n\n\nclass InvalidAppMetadataXmlException(APIException):\n pass\n\n\nclass UnsupportedAppArchiveException(APIException):\n pass\n\n\nclass InvalidAppPackageStructureException(APIException):\n pass\n\n\nclass XMLSyntaxError(APIException):\n pass\n\n\nclass GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n :argument config the config\n \"\"\"\n self.config = config\n self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*$')\n\n def extract_app_metadata(self, archive_path: str) -> str:\n \"\"\"\n Extracts the info.xml from an tar.gz archive\n :argument archive_path the path to the tar.gz archive\n :raises InvalidAppPackageStructureException if the first level folder\n does not equal the app_id or no info.xml file could be found in the\n appinfo folder\n :return the info.xml as string\n \"\"\"\n if not tarfile.is_tarfile(archive_path):\n msg = '%s is not a valid tar.gz archive ' % archive_path\n raise UnsupportedAppArchiveException(msg)\n\n with tarfile.open(archive_path, 'r:gz') as tar:\n result = self._parse_archive(tar)\n return result\n\n def _parse_archive(self, tar: Any) -> str:\n folder = list(\n filter(lambda name: re.match(self.app_folder_regex, name),\n tar.getnames()\n )\n )\n if len(folder) > 1:\n msg = 'More than one possible app folder found'\n raise InvalidAppPackageStructureException(msg)\n elif len(folder) == 0:\n msg = 'No possible app folder found. App folder must contain ' \\\n 'only lowercase ASCII characters or underscores'\n raise InvalidAppPackageStructureException(msg)\n\n info_path = '%s/appinfo/info.xml' % folder[0]\n try:\n info_member = tar.getmember(info_path) # type: ignore\n if info_member.issym() or info_member.islnk():\n msg = 'Symlinks and hard links can not be used for info.xml ' \\\n 'files'\n raise InvalidAppPackageStructureException(msg)\n if info_member.size > self.config.max_info_size:\n msg = '%s was bigger than allowed %i bytes' % (\n info_path, self.config.max_info_size)\n raise MaxSizeAppMetadataXmlException(msg)\n info_file = tar.extractfile(info_member)\n return info_file.read().decode('utf-8')\n except KeyError:\n msg = 'Could not find %s file inside the archive' % info_path\n raise InvalidAppPackageStructureException(msg)\n\n\ndef element_to_dict(element: Any) -> Dict:\n type = element.get('type')\n key = element.tag.replace('-', '_')\n if type == 'int':\n return {key: int(element.text)}\n elif type == 'list':\n return {key: list(map(element_to_dict, element.iterchildren()))}\n elif len(list(element)) > 0:\n contents = {}\n for child in element.iterchildren():\n contents.update(element_to_dict(child))\n return {key: contents}\n else:\n return {key: element.text}\n\n\ndef parse_app_metadata(xml: str, schema: str, pre_xslt: str,\n xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n :argument pre_xslt xslt which is run before validation to ensure that\n everything is in the correct order and that unknown elements are excluded\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n remove_blank_text=True, dtd_validation=False\n )\n try:\n doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n except lxml.etree.XMLSyntaxError as e:\n msg = 'info.xml contains malformed xml: %s' % e\n raise XMLSyntaxError(msg)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))\n pre_transformed_doc = pre_transform(doc)\n schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n schema.assertValid(pre_transformed_doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n return mapped\n", "path": "nextcloudappstore/core/api/v1/release/parser.py"}, {"content": "from django.conf import settings # type: ignore\nfrom nextcloudappstore.core.facades import read_relative_file\n\n\nclass ReleaseConfig:\n def __init__(self) -> None:\n self.max_info_size = 512 * 1024 # bytes\n self.download_root = settings.RELEASE_DOWNLOAD_ROOT # type: str\n self.download_max_timeout = 60\n self.download_max_redirects = 10\n self.download_max_size = 20 * (1024 ** 2)\n self.info_schema = read_relative_file(__file__, 'info.xsd')\n self.info_xslt = read_relative_file(__file__, 'info.xslt')\n self.pre_info_xslt = read_relative_file(__file__, 'pre-info.xslt')\n", "path": "nextcloudappstore/core/api/v1/release/__init__.py"}, {"content": "from nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom nextcloudappstore.core.api.v1.release.downloader import \\\n AppReleaseDownloader\nfrom nextcloudappstore.core.api.v1.release.parser import \\\n GunZipAppMetadataExtractor, parse_app_metadata\nfrom hashlib import sha256\nfrom typing import Dict\n\n\nclass AppReleaseProvider:\n def __init__(self, downloader: AppReleaseDownloader,\n extractor: GunZipAppMetadataExtractor,\n config: ReleaseConfig) -> None:\n self.config = config\n self.extractor = extractor\n self.downloader = downloader\n\n def get_release_info(self, url: str) -> Dict:\n with self.downloader.get_archive(\n url, self.config.download_root, self.config.download_max_timeout,\n self.config.download_max_redirects, self.config.download_max_size\n ) as download:\n xml = self.extractor.extract_app_metadata(download.filename)\n info = parse_app_metadata(xml, self.config.info_schema,\n self.config.pre_info_xslt,\n self.config.info_xslt)\n # generate sha256sum for archive\n with open(download.filename, 'rb') as f:\n checksum = sha256(f.read()).hexdigest()\n info['app']['release']['checksum'] = checksum\n return info\n", "path": "nextcloudappstore/core/api/v1/release/provider.py"}]}
2,129
953
gh_patches_debug_61694
rasdani/github-patches
git_diff
mdn__kuma-7762
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Locale searches aren't working **Summary** https://developer.mozilla.org/de/search?q=Zuweisungsoperator yields nothing. But it should work, because of: https://developer.mozilla.org/api/v1/search?q=Zuweisungsoperator&locale=de **Additional context** _Is there anything else we should know?_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/search/views.py` Content: ``` 1 from urllib.parse import parse_qs, urlencode 2 3 from django.conf import settings 4 from django.shortcuts import render 5 from django.urls import reverse_lazy 6 from django.views.decorators.cache import never_cache 7 from django.views.decorators.http import require_GET 8 from django.views.generic import RedirectView 9 from ratelimit.decorators import ratelimit 10 11 from kuma.api.v1.search import search as search_api 12 from kuma.core.decorators import shared_cache_control 13 14 15 # Since the search endpoint accepts user input (via query parameters) and its 16 # response is compressed, use rate limiting to mitigate the BREACH attack 17 # (see http://breachattack.com/). It still needs to allow a user to click 18 # the filter switches (bug 1426968). 19 # Alternate: forbid gzip by setting Content-Encoding: identity 20 @never_cache 21 @require_GET 22 @ratelimit(key="user_or_ip", rate="25/m", block=True) 23 def search(request, *args, **kwargs): 24 """ 25 The search view. 26 27 --2021-- THIS VIEW IS A HACK! --2021-- 28 This Django view exists to server-side render the search results page. 29 But we're moving the search result page to Yari and that one will use a XHR 30 request (to /api/v1/search) from a skeleton page (aka. SPA). 31 But as a way to get to that, we need to transition from the old to the new. 32 So, this page uses the Django view in kuma.api.v1.search.search, which 33 returns a special `JsonResponse` instance whose data we can pluck out 34 to our needs for this old view. 35 Once we've fully moved to the Yari (static + XHR to v1 API) site-search, 36 we can comfortably delete this view. 37 """ 38 # The underlying v1 API supports searching without a 'q' but the web 39 # UI doesn't. For example, the search input field requires a value. 40 # So we match that here too. 41 if not request.GET.get("q", "").strip(): 42 status = 400 43 context = {"results": {}} 44 else: 45 # TODO consider, if the current locale is *not* en-US, that we force 46 # it to do a search in both locales. 47 # This might come in handy for people searching in a locale where 48 # there's very little results but they'd be happy to get the en-US ones. 49 response = search_api(request, *args, **kwargs) 50 results = response.data 51 52 error = None 53 status = response.status_code 54 55 # Determine if there were validation errors 56 if status == 400: 57 error = "" 58 for key, messages in results["errors"].items(): 59 for message in messages: 60 error += f"{key}: {message['message']}\n" 61 else: 62 # Have to rearrange the 'results' in a way the old search expects it. 63 # ...which is as follows: 64 # - `count`: integer number of matched documents 65 # - `previous`: a URL or empty string 66 # - `next`: a URL or empty string 67 # - `query`: string 68 # - `start`: pagination number 69 # - `end`: pagination number 70 # - `documents`: 71 # - `title` 72 # - `locale` 73 # - `slug` 74 # - `excerpt`: string of safe HTML 75 next_url = "" 76 previous_url = "" 77 page = results["metadata"]["page"] 78 size = results["metadata"]["size"] 79 count = results["metadata"]["total"]["value"] 80 query_string = request.META.get("QUERY_STRING") 81 query_string_parsed = parse_qs(query_string) 82 if (page + 1) * size < count: 83 query_string_parsed["page"] = f"{page + 1}" 84 next_url = f"?{urlencode(query_string_parsed, True)}" 85 if page > 1: 86 if page == 2: 87 del query_string_parsed["page"] 88 else: 89 query_string_parsed["page"] = f"{page - 1}" 90 previous_url = f"?{urlencode(query_string_parsed, True)}" 91 92 results = { 93 "count": count, 94 "next": next_url, 95 "previous": previous_url, 96 "query": request.GET.get("q"), 97 "start": (page - 1) * size + 1, 98 "end": page * size, 99 "documents": [ 100 { 101 "title": x["title"], 102 "slug": x["slug"], 103 "locale": x["locale"], 104 "excerpt": "<br>".join(x["highlight"].get("body", [])), 105 } 106 for x in results["documents"] 107 ], 108 } 109 110 context = {"results": {"results": None if error else results, "error": error}} 111 return render(request, "search/react.html", context, status=status) 112 113 114 class SearchRedirectView(RedirectView): 115 permanent = True 116 117 def get_redirect_url(self, *args, **kwargs): 118 query_string = self.request.META.get("QUERY_STRING") 119 url = reverse_lazy("api.v1.search") 120 qs = parse_qs(query_string) 121 # If you used `/en-Us/search.json` you can skip the `?locale=` 122 # because the default locale in `/api/v1/search` is `en-US`. 123 if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower(): 124 qs["locale"] = self.request.LANGUAGE_CODE 125 if qs: 126 url += "?" + urlencode(qs, True) 127 return url 128 129 130 @shared_cache_control(s_maxage=60 * 60 * 24 * 7) 131 def plugin(request): 132 """Render an OpenSearch Plugin.""" 133 return render( 134 request, 135 "search/plugin.html", 136 {"locale": request.LANGUAGE_CODE}, 137 content_type="application/opensearchdescription+xml", 138 ) 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/search/views.py b/kuma/search/views.py --- a/kuma/search/views.py +++ b/kuma/search/views.py @@ -46,7 +46,7 @@ # it to do a search in both locales. # This might come in handy for people searching in a locale where # there's very little results but they'd be happy to get the en-US ones. - response = search_api(request, *args, **kwargs) + response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs) results = response.data error = None
{"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -46,7 +46,7 @@\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n- response = search_api(request, *args, **kwargs)\n+ response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n \n error = None\n", "issue": "Locale searches aren't working\n**Summary**\r\nhttps://developer.mozilla.org/de/search?q=Zuweisungsoperator yields nothing. \r\nBut it should work, because of: https://developer.mozilla.org/api/v1/search?q=Zuweisungsoperator&locale=de\r\n\r\n\r\n**Additional context**\r\n_Is there anything else we should know?_\r\n\n", "before_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}], "after_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]}
1,928
134
gh_patches_debug_19038
rasdani/github-patches
git_diff
plone__Products.CMFPlone-2971
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dexterity Control Panel is missing if site is created without default/example content Plone 5.0.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/factory.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from logging import getLogger 3 from plone.registry.interfaces import IRegistry 4 from Products.CMFPlone import PloneMessageFactory as _ 5 from Products.CMFPlone.events import SiteManagerCreatedEvent 6 from Products.CMFPlone.interfaces import INonInstallable 7 from Products.CMFPlone.Portal import PloneSite 8 from Products.GenericSetup.tool import SetupTool 9 from Products.statusmessages.interfaces import IStatusMessage 10 from zope.component import queryUtility 11 from zope.component.hooks import setSite 12 from zope.event import notify 13 from zope.interface import implementer 14 15 _TOOL_ID = 'portal_setup' 16 _DEFAULT_PROFILE = 'Products.CMFPlone:plone' 17 _CONTENT_PROFILE = 'plone.app.contenttypes:plone-content' 18 19 # A little hint for PloneTestCase 20 _IMREALLYPLONE5 = True 21 22 logger = getLogger('Plone') 23 24 25 @implementer(INonInstallable) 26 class NonInstallable(object): 27 28 def getNonInstallableProducts(self): 29 return [ 30 'Archetypes', 'Products.Archetypes', 31 'CMFDefault', 'Products.CMFDefault', 32 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations', 33 'CMFTopic', 'Products.CMFTopic', 34 'CMFUid', 'Products.CMFUid', 35 'DCWorkflow', 'Products.DCWorkflow', 36 'PasswordResetTool', 'Products.PasswordResetTool', 37 'PlonePAS', 'Products.PlonePAS', 38 'wicked.at', 39 'PloneLanguageTool', 'Products.PloneLanguageTool', 40 'CMFFormController', 'Products.CMFFormController', 41 'MimetypesRegistry', 'Products.MimetypesRegistry', 42 'PortalTransforms', 'Products.PortalTransforms', 43 'CMFDiffTool', 'Products.CMFDiffTool', 44 'CMFEditions', 'Products.CMFEditions', 45 'Products.NuPlone', 46 'borg.localrole', 47 'plone.app.blob', 48 'plone.app.caching', 49 'plone.app.collection', 50 'plone.app.dexterity', 51 'plone.app.discussion', 52 'plone.app.event', 53 'plone.app.imaging', 54 'plone.app.intid', 55 'plone.app.linkintegrity', 56 'plone.app.querystring', 57 'plone.app.registry', 58 'plone.app.referenceablebehavior', 59 'plone.app.relationfield', 60 'plone.app.theming', 61 'plone.app.users', 62 'plone.app.widgets', 63 'plone.app.z3cform', 64 'plone.formwidget.recurrence', 65 'plone.keyring', 66 'plone.outputfilters', 67 'plone.portlet.static', 68 'plone.portlet.collection', 69 'plone.protect', 70 'plone.resource', 71 'plonetheme.barceloneta', 72 ] 73 74 def getNonInstallableProfiles(self): 75 return [_DEFAULT_PROFILE, 76 _CONTENT_PROFILE, 77 u'Products.Archetypes:Archetypes', 78 u'Products.ATContentTypes:default', 79 u'Products.CMFDiffTool:CMFDiffTool', 80 u'Products.CMFEditions:CMFEditions', 81 u'Products.CMFFormController:CMFFormController', 82 u'Products.CMFPlone:dependencies', 83 u'Products.CMFPlone:testfixture', 84 u'Products.CMFQuickInstallerTool:CMFQuickInstallerTool', 85 u'Products.NuPlone:uninstall', 86 u'Products.MimetypesRegistry:MimetypesRegistry', 87 u'Products.PasswordResetTool:PasswordResetTool', 88 u'Products.PortalTransforms:PortalTransforms', 89 u'Products.PloneLanguageTool:PloneLanguageTool', 90 u'Products.PlonePAS:PlonePAS', 91 u'borg.localrole:default', 92 u'plone.browserlayer:default', 93 u'plone.keyring:default', 94 u'plone.outputfilters:default', 95 u'plone.portlet.static:default', 96 u'plone.portlet.collection:default', 97 u'plone.protect:default', 98 u'plone.app.blob:default', 99 u'plone.app.blob:file-replacement', 100 u'plone.app.blob:image-replacement', 101 u'plone.app.blob:sample-type', 102 u'plone.app.collection:default', 103 u'plone.app.contenttypes:default', 104 u'plone.app.dexterity:default', 105 u'plone.app.discussion:default', 106 u'plone.app.event:default', 107 u'plone.app.imaging:default', 108 u'plone.app.linkintegrity:default', 109 u'plone.app.registry:default', 110 u'plone.app.relationfield:default', 111 u'plone.app.theming:default', 112 u'plone.app.users:default', 113 u'plone.app.versioningbehavior:default', 114 u'plone.app.z3cform:default', 115 u'plone.formwidget.recurrence:default', 116 u'plone.resource:default', 117 ] 118 119 120 def zmi_constructor(context): 121 """This is a dummy constructor for the ZMI.""" 122 url = context.DestinationURL() 123 request = context.REQUEST 124 return request.response.redirect(url + '/@@plone-addsite?site_id=Plone') 125 126 127 def addPloneSite(context, site_id, title='Plone site', description='', 128 profile_id=_DEFAULT_PROFILE, 129 content_profile_id=_CONTENT_PROFILE, snapshot=False, 130 extension_ids=(), setup_content=True, 131 default_language='en', portal_timezone='UTC'): 132 """Add a PloneSite to the context.""" 133 context._setObject(site_id, PloneSite(site_id)) 134 site = context._getOb(site_id) 135 site.setLanguage(default_language) 136 # Set the accepted language for the rest of the request. This makes sure 137 # the front-page text gets the correct translation also when your browser 138 # prefers non-English and you choose English as language for the Plone 139 # Site. 140 request = context.REQUEST 141 request['HTTP_ACCEPT_LANGUAGE'] = default_language 142 143 site[_TOOL_ID] = SetupTool(_TOOL_ID) 144 setup_tool = site[_TOOL_ID] 145 146 notify(SiteManagerCreatedEvent(site)) 147 setSite(site) 148 149 setup_tool.setBaselineContext('profile-%s' % profile_id) 150 setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id) 151 152 reg = queryUtility(IRegistry, context=site) 153 reg['plone.portal_timezone'] = portal_timezone 154 reg['plone.available_timezones'] = [portal_timezone] 155 reg['plone.default_language'] = default_language 156 reg['plone.available_languages'] = [default_language] 157 158 if setup_content: 159 setup_tool.runAllImportStepsFromProfile( 160 'profile-%s' % content_profile_id) 161 162 props = dict( 163 title=title, 164 description=description, 165 ) 166 # Do this before applying extension profiles, so the settings from a 167 # properties.xml file are applied and not overwritten by this 168 site.manage_changeProperties(**props) 169 170 for extension_id in extension_ids: 171 try: 172 setup_tool.runAllImportStepsFromProfile( 173 'profile-%s' % extension_id) 174 except Exception as msg: 175 IStatusMessage(request).add(_( 176 'Could not install ${profile_id}: ${error_msg}! ' 177 'Please try to install it manually using the "Addons" ' 178 'controlpanel and report any issues to the ' 179 'addon maintainers.', 180 mapping={ 181 'profile_id': extension_id, 182 'error_msg': msg.args, 183 }), 184 type='error') 185 logger.exception( 186 'Error while installing addon {}. ' 187 'See traceback below for details.'.format(extension_id)) 188 189 if snapshot is True: 190 setup_tool.createSnapshot('initial_configuration') 191 192 return site 193 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/factory.py b/Products/CMFPlone/factory.py --- a/Products/CMFPlone/factory.py +++ b/Products/CMFPlone/factory.py @@ -14,6 +14,7 @@ _TOOL_ID = 'portal_setup' _DEFAULT_PROFILE = 'Products.CMFPlone:plone' +_TYPES_PROFILE = 'plone.app.contenttypes:default' _CONTENT_PROFILE = 'plone.app.contenttypes:plone-content' # A little hint for PloneTestCase @@ -155,9 +156,11 @@ reg['plone.default_language'] = default_language reg['plone.available_languages'] = [default_language] - if setup_content: - setup_tool.runAllImportStepsFromProfile( - 'profile-%s' % content_profile_id) + # Install default content types profile if user do not select "example content" + # during site creation. + content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE + + setup_tool.runAllImportStepsFromProfile('profile-{0}'.format(content_types_profile)) props = dict( title=title,
{"golden_diff": "diff --git a/Products/CMFPlone/factory.py b/Products/CMFPlone/factory.py\n--- a/Products/CMFPlone/factory.py\n+++ b/Products/CMFPlone/factory.py\n@@ -14,6 +14,7 @@\n \n _TOOL_ID = 'portal_setup'\n _DEFAULT_PROFILE = 'Products.CMFPlone:plone'\n+_TYPES_PROFILE = 'plone.app.contenttypes:default'\n _CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'\n \n # A little hint for PloneTestCase\n@@ -155,9 +156,11 @@\n reg['plone.default_language'] = default_language\n reg['plone.available_languages'] = [default_language]\n \n- if setup_content:\n- setup_tool.runAllImportStepsFromProfile(\n- 'profile-%s' % content_profile_id)\n+ # Install default content types profile if user do not select \"example content\"\n+ # during site creation.\n+ content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n+\n+ setup_tool.runAllImportStepsFromProfile('profile-{0}'.format(content_types_profile))\n \n props = dict(\n title=title,\n", "issue": "Dexterity Control Panel is missing if site is created without default/example content\nPlone 5.0.2\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom logging import getLogger\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.events import SiteManagerCreatedEvent\nfrom Products.CMFPlone.interfaces import INonInstallable\nfrom Products.CMFPlone.Portal import PloneSite\nfrom Products.GenericSetup.tool import SetupTool\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import queryUtility\nfrom zope.component.hooks import setSite\nfrom zope.event import notify\nfrom zope.interface import implementer\n\n_TOOL_ID = 'portal_setup'\n_DEFAULT_PROFILE = 'Products.CMFPlone:plone'\n_CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'\n\n# A little hint for PloneTestCase\n_IMREALLYPLONE5 = True\n\nlogger = getLogger('Plone')\n\n\n@implementer(INonInstallable)\nclass NonInstallable(object):\n\n def getNonInstallableProducts(self):\n return [\n 'Archetypes', 'Products.Archetypes',\n 'CMFDefault', 'Products.CMFDefault',\n 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations',\n 'CMFTopic', 'Products.CMFTopic',\n 'CMFUid', 'Products.CMFUid',\n 'DCWorkflow', 'Products.DCWorkflow',\n 'PasswordResetTool', 'Products.PasswordResetTool',\n 'PlonePAS', 'Products.PlonePAS',\n 'wicked.at',\n 'PloneLanguageTool', 'Products.PloneLanguageTool',\n 'CMFFormController', 'Products.CMFFormController',\n 'MimetypesRegistry', 'Products.MimetypesRegistry',\n 'PortalTransforms', 'Products.PortalTransforms',\n 'CMFDiffTool', 'Products.CMFDiffTool',\n 'CMFEditions', 'Products.CMFEditions',\n 'Products.NuPlone',\n 'borg.localrole',\n 'plone.app.blob',\n 'plone.app.caching',\n 'plone.app.collection',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.event',\n 'plone.app.imaging',\n 'plone.app.intid',\n 'plone.app.linkintegrity',\n 'plone.app.querystring',\n 'plone.app.registry',\n 'plone.app.referenceablebehavior',\n 'plone.app.relationfield',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.widgets',\n 'plone.app.z3cform',\n 'plone.formwidget.recurrence',\n 'plone.keyring',\n 'plone.outputfilters',\n 'plone.portlet.static',\n 'plone.portlet.collection',\n 'plone.protect',\n 'plone.resource',\n 'plonetheme.barceloneta',\n ]\n\n def getNonInstallableProfiles(self):\n return [_DEFAULT_PROFILE,\n _CONTENT_PROFILE,\n u'Products.Archetypes:Archetypes',\n u'Products.ATContentTypes:default',\n u'Products.CMFDiffTool:CMFDiffTool',\n u'Products.CMFEditions:CMFEditions',\n u'Products.CMFFormController:CMFFormController',\n u'Products.CMFPlone:dependencies',\n u'Products.CMFPlone:testfixture',\n u'Products.CMFQuickInstallerTool:CMFQuickInstallerTool',\n u'Products.NuPlone:uninstall',\n u'Products.MimetypesRegistry:MimetypesRegistry',\n u'Products.PasswordResetTool:PasswordResetTool',\n u'Products.PortalTransforms:PortalTransforms',\n u'Products.PloneLanguageTool:PloneLanguageTool',\n u'Products.PlonePAS:PlonePAS',\n u'borg.localrole:default',\n u'plone.browserlayer:default',\n u'plone.keyring:default',\n u'plone.outputfilters:default',\n u'plone.portlet.static:default',\n u'plone.portlet.collection:default',\n u'plone.protect:default',\n u'plone.app.blob:default',\n u'plone.app.blob:file-replacement',\n u'plone.app.blob:image-replacement',\n u'plone.app.blob:sample-type',\n u'plone.app.collection:default',\n u'plone.app.contenttypes:default',\n u'plone.app.dexterity:default',\n u'plone.app.discussion:default',\n u'plone.app.event:default',\n u'plone.app.imaging:default',\n u'plone.app.linkintegrity:default',\n u'plone.app.registry:default',\n u'plone.app.relationfield:default',\n u'plone.app.theming:default',\n u'plone.app.users:default',\n u'plone.app.versioningbehavior:default',\n u'plone.app.z3cform:default',\n u'plone.formwidget.recurrence:default',\n u'plone.resource:default',\n ]\n\n\ndef zmi_constructor(context):\n \"\"\"This is a dummy constructor for the ZMI.\"\"\"\n url = context.DestinationURL()\n request = context.REQUEST\n return request.response.redirect(url + '/@@plone-addsite?site_id=Plone')\n\n\ndef addPloneSite(context, site_id, title='Plone site', description='',\n profile_id=_DEFAULT_PROFILE,\n content_profile_id=_CONTENT_PROFILE, snapshot=False,\n extension_ids=(), setup_content=True,\n default_language='en', portal_timezone='UTC'):\n \"\"\"Add a PloneSite to the context.\"\"\"\n context._setObject(site_id, PloneSite(site_id))\n site = context._getOb(site_id)\n site.setLanguage(default_language)\n # Set the accepted language for the rest of the request. This makes sure\n # the front-page text gets the correct translation also when your browser\n # prefers non-English and you choose English as language for the Plone\n # Site.\n request = context.REQUEST\n request['HTTP_ACCEPT_LANGUAGE'] = default_language\n\n site[_TOOL_ID] = SetupTool(_TOOL_ID)\n setup_tool = site[_TOOL_ID]\n\n notify(SiteManagerCreatedEvent(site))\n setSite(site)\n\n setup_tool.setBaselineContext('profile-%s' % profile_id)\n setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n\n reg = queryUtility(IRegistry, context=site)\n reg['plone.portal_timezone'] = portal_timezone\n reg['plone.available_timezones'] = [portal_timezone]\n reg['plone.default_language'] = default_language\n reg['plone.available_languages'] = [default_language]\n\n if setup_content:\n setup_tool.runAllImportStepsFromProfile(\n 'profile-%s' % content_profile_id)\n\n props = dict(\n title=title,\n description=description,\n )\n # Do this before applying extension profiles, so the settings from a\n # properties.xml file are applied and not overwritten by this\n site.manage_changeProperties(**props)\n\n for extension_id in extension_ids:\n try:\n setup_tool.runAllImportStepsFromProfile(\n 'profile-%s' % extension_id)\n except Exception as msg:\n IStatusMessage(request).add(_(\n 'Could not install ${profile_id}: ${error_msg}! '\n 'Please try to install it manually using the \"Addons\" '\n 'controlpanel and report any issues to the '\n 'addon maintainers.',\n mapping={\n 'profile_id': extension_id,\n 'error_msg': msg.args,\n }),\n type='error')\n logger.exception(\n 'Error while installing addon {}. '\n 'See traceback below for details.'.format(extension_id))\n\n if snapshot is True:\n setup_tool.createSnapshot('initial_configuration')\n\n return site\n", "path": "Products/CMFPlone/factory.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom logging import getLogger\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.events import SiteManagerCreatedEvent\nfrom Products.CMFPlone.interfaces import INonInstallable\nfrom Products.CMFPlone.Portal import PloneSite\nfrom Products.GenericSetup.tool import SetupTool\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom zope.component import queryUtility\nfrom zope.component.hooks import setSite\nfrom zope.event import notify\nfrom zope.interface import implementer\n\n_TOOL_ID = 'portal_setup'\n_DEFAULT_PROFILE = 'Products.CMFPlone:plone'\n_TYPES_PROFILE = 'plone.app.contenttypes:default'\n_CONTENT_PROFILE = 'plone.app.contenttypes:plone-content'\n\n# A little hint for PloneTestCase\n_IMREALLYPLONE5 = True\n\nlogger = getLogger('Plone')\n\n\n@implementer(INonInstallable)\nclass NonInstallable(object):\n\n def getNonInstallableProducts(self):\n return [\n 'Archetypes', 'Products.Archetypes',\n 'CMFDefault', 'Products.CMFDefault',\n 'CMFPlone', 'Products.CMFPlone', 'Products.CMFPlone.migrations',\n 'CMFTopic', 'Products.CMFTopic',\n 'CMFUid', 'Products.CMFUid',\n 'DCWorkflow', 'Products.DCWorkflow',\n 'PasswordResetTool', 'Products.PasswordResetTool',\n 'PlonePAS', 'Products.PlonePAS',\n 'wicked.at',\n 'PloneLanguageTool', 'Products.PloneLanguageTool',\n 'CMFFormController', 'Products.CMFFormController',\n 'MimetypesRegistry', 'Products.MimetypesRegistry',\n 'PortalTransforms', 'Products.PortalTransforms',\n 'CMFDiffTool', 'Products.CMFDiffTool',\n 'CMFEditions', 'Products.CMFEditions',\n 'Products.NuPlone',\n 'borg.localrole',\n 'plone.app.blob',\n 'plone.app.caching',\n 'plone.app.collection',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.event',\n 'plone.app.imaging',\n 'plone.app.intid',\n 'plone.app.linkintegrity',\n 'plone.app.querystring',\n 'plone.app.registry',\n 'plone.app.referenceablebehavior',\n 'plone.app.relationfield',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.widgets',\n 'plone.app.z3cform',\n 'plone.formwidget.recurrence',\n 'plone.keyring',\n 'plone.outputfilters',\n 'plone.portlet.static',\n 'plone.portlet.collection',\n 'plone.protect',\n 'plone.resource',\n 'plonetheme.barceloneta',\n ]\n\n def getNonInstallableProfiles(self):\n return [_DEFAULT_PROFILE,\n _CONTENT_PROFILE,\n u'Products.Archetypes:Archetypes',\n u'Products.ATContentTypes:default',\n u'Products.CMFDiffTool:CMFDiffTool',\n u'Products.CMFEditions:CMFEditions',\n u'Products.CMFFormController:CMFFormController',\n u'Products.CMFPlone:dependencies',\n u'Products.CMFPlone:testfixture',\n u'Products.CMFQuickInstallerTool:CMFQuickInstallerTool',\n u'Products.NuPlone:uninstall',\n u'Products.MimetypesRegistry:MimetypesRegistry',\n u'Products.PasswordResetTool:PasswordResetTool',\n u'Products.PortalTransforms:PortalTransforms',\n u'Products.PloneLanguageTool:PloneLanguageTool',\n u'Products.PlonePAS:PlonePAS',\n u'borg.localrole:default',\n u'plone.browserlayer:default',\n u'plone.keyring:default',\n u'plone.outputfilters:default',\n u'plone.portlet.static:default',\n u'plone.portlet.collection:default',\n u'plone.protect:default',\n u'plone.app.blob:default',\n u'plone.app.blob:file-replacement',\n u'plone.app.blob:image-replacement',\n u'plone.app.blob:sample-type',\n u'plone.app.collection:default',\n u'plone.app.contenttypes:default',\n u'plone.app.dexterity:default',\n u'plone.app.discussion:default',\n u'plone.app.event:default',\n u'plone.app.imaging:default',\n u'plone.app.linkintegrity:default',\n u'plone.app.registry:default',\n u'plone.app.relationfield:default',\n u'plone.app.theming:default',\n u'plone.app.users:default',\n u'plone.app.versioningbehavior:default',\n u'plone.app.z3cform:default',\n u'plone.formwidget.recurrence:default',\n u'plone.resource:default',\n ]\n\n\ndef zmi_constructor(context):\n \"\"\"This is a dummy constructor for the ZMI.\"\"\"\n url = context.DestinationURL()\n request = context.REQUEST\n return request.response.redirect(url + '/@@plone-addsite?site_id=Plone')\n\n\ndef addPloneSite(context, site_id, title='Plone site', description='',\n profile_id=_DEFAULT_PROFILE,\n content_profile_id=_CONTENT_PROFILE, snapshot=False,\n extension_ids=(), setup_content=True,\n default_language='en', portal_timezone='UTC'):\n \"\"\"Add a PloneSite to the context.\"\"\"\n context._setObject(site_id, PloneSite(site_id))\n site = context._getOb(site_id)\n site.setLanguage(default_language)\n # Set the accepted language for the rest of the request. This makes sure\n # the front-page text gets the correct translation also when your browser\n # prefers non-English and you choose English as language for the Plone\n # Site.\n request = context.REQUEST\n request['HTTP_ACCEPT_LANGUAGE'] = default_language\n\n site[_TOOL_ID] = SetupTool(_TOOL_ID)\n setup_tool = site[_TOOL_ID]\n\n notify(SiteManagerCreatedEvent(site))\n setSite(site)\n\n setup_tool.setBaselineContext('profile-%s' % profile_id)\n setup_tool.runAllImportStepsFromProfile('profile-%s' % profile_id)\n\n reg = queryUtility(IRegistry, context=site)\n reg['plone.portal_timezone'] = portal_timezone\n reg['plone.available_timezones'] = [portal_timezone]\n reg['plone.default_language'] = default_language\n reg['plone.available_languages'] = [default_language]\n\n # Install default content types profile if user do not select \"example content\"\n # during site creation.\n content_types_profile = content_profile_id if setup_content else _TYPES_PROFILE\n\n setup_tool.runAllImportStepsFromProfile('profile-{0}'.format(content_types_profile))\n\n props = dict(\n title=title,\n description=description,\n )\n # Do this before applying extension profiles, so the settings from a\n # properties.xml file are applied and not overwritten by this\n site.manage_changeProperties(**props)\n\n for extension_id in extension_ids:\n try:\n setup_tool.runAllImportStepsFromProfile(\n 'profile-%s' % extension_id)\n except Exception as msg:\n IStatusMessage(request).add(_(\n 'Could not install ${profile_id}: ${error_msg}! '\n 'Please try to install it manually using the \"Addons\" '\n 'controlpanel and report any issues to the '\n 'addon maintainers.',\n mapping={\n 'profile_id': extension_id,\n 'error_msg': msg.args,\n }),\n type='error')\n logger.exception(\n 'Error while installing addon {}. '\n 'See traceback below for details.'.format(extension_id))\n\n if snapshot is True:\n setup_tool.createSnapshot('initial_configuration')\n\n return site\n", "path": "Products/CMFPlone/factory.py"}]}
2,489
268
gh_patches_debug_13051
rasdani/github-patches
git_diff
ckan__ckan-7871
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- current_user.is_anonymous doesn't care if context has an user ## CKAN version 2.10 ## Describe the bug While looking into why https://github.com/ckan/ckan/pull/7266 started failing on unmodified ckan, I noticed that deciphering anonymous users has changed. On 2.9 `restrict_anon` works as follows https://github.com/ckan/ckan/blob/c4e2818818e08e60bb69d64229f8dbba531f8439/ckan/logic/auth/__init__.py#L51-L55 where `authz.auth_is_anon_user` checks if the context has an user in it. On 2.10 the functionality has changed: https://github.com/ckan/ckan/blob/d46613e346f9dc551aedb54c8c24baad919f78c1/ckan/logic/auth/__init__.py#L93-L97 which does not check for context at all and the password reset started failing on 2.10. Should the `is_anonymous` check for user in context or are we just relying on what flask login says about the user? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckan/logic/auth/__init__.py` Content: ``` 1 # encoding: utf-8 2 3 ''' 4 Helper functions to be used in the auth check functions 5 ''' 6 7 from typing import Any, Optional, TYPE_CHECKING, overload 8 from typing_extensions import Literal 9 10 import ckan.logic as logic 11 from ckan.types import Context, AuthResult, DataDict 12 from ckan.common import current_user 13 14 if TYPE_CHECKING: 15 import ckan.model as model_ 16 17 18 @overload 19 def _get_object(context: Context, 20 data_dict: Optional[DataDict], name: str, 21 class_name: Literal['Package']) -> 'model_.Package': 22 ... 23 24 25 @overload 26 def _get_object(context: Context, 27 data_dict: Optional[DataDict], name: str, 28 class_name: Literal['Resource']) -> 'model_.Resource': 29 ... 30 31 32 @overload 33 def _get_object(context: Context, 34 data_dict: Optional[DataDict], name: str, 35 class_name: Literal['Group']) -> 'model_.Group': 36 ... 37 38 39 @overload 40 def _get_object(context: Context, 41 data_dict: Optional[DataDict], name: str, 42 class_name: Literal['User']) -> 'model_.User': 43 ... 44 45 46 def _get_object(context: Context, 47 data_dict: Optional[DataDict], name: str, 48 class_name: str) -> Any: 49 # return the named item from model.class_name 50 model = context['model'] 51 if not data_dict: 52 data_dict = {} 53 id = data_dict.get('id', None) 54 if not id: 55 raise logic.ValidationError({ 56 "message": 'Missing id, can not get {0} object'.format( 57 class_name)}) 58 obj = getattr(model, class_name).get(id) 59 if not obj: 60 raise logic.NotFound 61 return obj 62 63 64 def get_package_object( 65 context: Context, 66 data_dict: Optional[DataDict] = None) -> 'model_.Package': 67 return _get_object(context, data_dict, 'package', 'Package') 68 69 70 def get_resource_object( 71 context: Context, 72 data_dict: Optional[DataDict] = None) -> 'model_.Resource': 73 return _get_object(context, data_dict, 'resource', 'Resource') 74 75 76 def get_group_object( 77 context: Context, 78 data_dict: Optional[DataDict] = None) -> 'model_.Group': 79 return _get_object(context, data_dict, 'group', 'Group') 80 81 82 def get_user_object( 83 context: Context, 84 data_dict: Optional[DataDict] = None) -> 'model_.User': 85 return _get_object(context, data_dict, 'user_obj', 'User') 86 87 88 def restrict_anon(context: Context) -> AuthResult: 89 if current_user.is_anonymous: 90 return {'success': False} 91 else: 92 return {'success': True} 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckan/logic/auth/__init__.py b/ckan/logic/auth/__init__.py --- a/ckan/logic/auth/__init__.py +++ b/ckan/logic/auth/__init__.py @@ -8,8 +8,8 @@ from typing_extensions import Literal import ckan.logic as logic +import ckan.authz as authz from ckan.types import Context, AuthResult, DataDict -from ckan.common import current_user if TYPE_CHECKING: import ckan.model as model_ @@ -86,7 +86,7 @@ def restrict_anon(context: Context) -> AuthResult: - if current_user.is_anonymous: + if authz.auth_is_anon_user(context): return {'success': False} else: return {'success': True}
{"golden_diff": "diff --git a/ckan/logic/auth/__init__.py b/ckan/logic/auth/__init__.py\n--- a/ckan/logic/auth/__init__.py\n+++ b/ckan/logic/auth/__init__.py\n@@ -8,8 +8,8 @@\n from typing_extensions import Literal\n \n import ckan.logic as logic\n+import ckan.authz as authz\n from ckan.types import Context, AuthResult, DataDict\n-from ckan.common import current_user\n \n if TYPE_CHECKING:\n import ckan.model as model_\n@@ -86,7 +86,7 @@\n \n \n def restrict_anon(context: Context) -> AuthResult:\n- if current_user.is_anonymous:\n+ if authz.auth_is_anon_user(context):\n return {'success': False}\n else:\n return {'success': True}\n", "issue": "current_user.is_anonymous doesn't care if context has an user\n## CKAN version\r\n2.10\r\n\r\n## Describe the bug\r\nWhile looking into why https://github.com/ckan/ckan/pull/7266 started failing on unmodified ckan, I noticed that deciphering anonymous users has changed. On 2.9 `restrict_anon` works as follows \r\nhttps://github.com/ckan/ckan/blob/c4e2818818e08e60bb69d64229f8dbba531f8439/ckan/logic/auth/__init__.py#L51-L55\r\n\r\nwhere `authz.auth_is_anon_user` checks if the context has an user in it. On 2.10 the functionality has changed:\r\n\r\nhttps://github.com/ckan/ckan/blob/d46613e346f9dc551aedb54c8c24baad919f78c1/ckan/logic/auth/__init__.py#L93-L97\r\n\r\nwhich does not check for context at all and the password reset started failing on 2.10. Should the `is_anonymous` check for user in context or are we just relying on what flask login says about the user? \r\n\r\n\n", "before_files": [{"content": "# encoding: utf-8\n\n'''\nHelper functions to be used in the auth check functions\n'''\n\nfrom typing import Any, Optional, TYPE_CHECKING, overload\nfrom typing_extensions import Literal\n\nimport ckan.logic as logic\nfrom ckan.types import Context, AuthResult, DataDict\nfrom ckan.common import current_user\n\nif TYPE_CHECKING:\n import ckan.model as model_\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Package']) -> 'model_.Package':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Resource']) -> 'model_.Resource':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Group']) -> 'model_.Group':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['User']) -> 'model_.User':\n ...\n\n\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: str) -> Any:\n # return the named item from model.class_name\n model = context['model']\n if not data_dict:\n data_dict = {}\n id = data_dict.get('id', None)\n if not id:\n raise logic.ValidationError({\n \"message\": 'Missing id, can not get {0} object'.format(\n class_name)})\n obj = getattr(model, class_name).get(id)\n if not obj:\n raise logic.NotFound\n return obj\n\n\ndef get_package_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Package':\n return _get_object(context, data_dict, 'package', 'Package')\n\n\ndef get_resource_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Resource':\n return _get_object(context, data_dict, 'resource', 'Resource')\n\n\ndef get_group_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Group':\n return _get_object(context, data_dict, 'group', 'Group')\n\n\ndef get_user_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.User':\n return _get_object(context, data_dict, 'user_obj', 'User')\n\n\ndef restrict_anon(context: Context) -> AuthResult:\n if current_user.is_anonymous:\n return {'success': False}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/__init__.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''\nHelper functions to be used in the auth check functions\n'''\n\nfrom typing import Any, Optional, TYPE_CHECKING, overload\nfrom typing_extensions import Literal\n\nimport ckan.logic as logic\nimport ckan.authz as authz\nfrom ckan.types import Context, AuthResult, DataDict\n\nif TYPE_CHECKING:\n import ckan.model as model_\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Package']) -> 'model_.Package':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Resource']) -> 'model_.Resource':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['Group']) -> 'model_.Group':\n ...\n\n\n@overload\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: Literal['User']) -> 'model_.User':\n ...\n\n\ndef _get_object(context: Context,\n data_dict: Optional[DataDict], name: str,\n class_name: str) -> Any:\n # return the named item from model.class_name\n model = context['model']\n if not data_dict:\n data_dict = {}\n id = data_dict.get('id', None)\n if not id:\n raise logic.ValidationError({\n \"message\": 'Missing id, can not get {0} object'.format(\n class_name)})\n obj = getattr(model, class_name).get(id)\n if not obj:\n raise logic.NotFound\n return obj\n\n\ndef get_package_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Package':\n return _get_object(context, data_dict, 'package', 'Package')\n\n\ndef get_resource_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Resource':\n return _get_object(context, data_dict, 'resource', 'Resource')\n\n\ndef get_group_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.Group':\n return _get_object(context, data_dict, 'group', 'Group')\n\n\ndef get_user_object(\n context: Context,\n data_dict: Optional[DataDict] = None) -> 'model_.User':\n return _get_object(context, data_dict, 'user_obj', 'User')\n\n\ndef restrict_anon(context: Context) -> AuthResult:\n if authz.auth_is_anon_user(context):\n return {'success': False}\n else:\n return {'success': True}\n", "path": "ckan/logic/auth/__init__.py"}]}
1,343
182
gh_patches_debug_32696
rasdani/github-patches
git_diff
pytorch__vision-107
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- VGG classifier setting different from Original paper One of the dropout layer was "wrongly" inserted. The original final layers of Caffe Version (https://gist.github.com/ksimonyan/211839e770f7b538e2d8) is: self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout() nn.Linear(4096, 1000), ) This won't make difference when we use model.eval(), but will make discrepancy if we want to finetune VggNet by loading Caffe's parameters. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/models/vgg.py` Content: ``` 1 import torch.nn as nn 2 import torch.utils.model_zoo as model_zoo 3 import math 4 5 6 __all__ = [ 7 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 8 'vgg19_bn', 'vgg19', 9 ] 10 11 12 model_urls = { 13 'vgg11': 'https://download.pytorch.org/models/vgg11-fb7e83b2.pth', 14 'vgg13': 'https://download.pytorch.org/models/vgg13-58758d87.pth', 15 'vgg16': 'https://download.pytorch.org/models/vgg16-82412952.pth', 16 'vgg19': 'https://download.pytorch.org/models/vgg19-341d7465.pth', 17 } 18 19 20 class VGG(nn.Module): 21 22 def __init__(self, features): 23 super(VGG, self).__init__() 24 self.features = features 25 self.classifier = nn.Sequential( 26 nn.Dropout(), 27 nn.Linear(512 * 7 * 7, 4096), 28 nn.ReLU(True), 29 nn.Dropout(), 30 nn.Linear(4096, 4096), 31 nn.ReLU(True), 32 nn.Linear(4096, 1000), 33 ) 34 self._initialize_weights() 35 36 def forward(self, x): 37 x = self.features(x) 38 x = x.view(x.size(0), -1) 39 x = self.classifier(x) 40 return x 41 42 def _initialize_weights(self): 43 for m in self.modules(): 44 if isinstance(m, nn.Conv2d): 45 n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 46 m.weight.data.normal_(0, math.sqrt(2. / n)) 47 if m.bias is not None: 48 m.bias.data.zero_() 49 elif isinstance(m, nn.BatchNorm2d): 50 m.weight.data.fill_(1) 51 m.bias.data.zero_() 52 elif isinstance(m, nn.Linear): 53 n = m.weight.size(1) 54 m.weight.data.normal_(0, 0.01) 55 m.bias.data.zero_() 56 57 58 def make_layers(cfg, batch_norm=False): 59 layers = [] 60 in_channels = 3 61 for v in cfg: 62 if v == 'M': 63 layers += [nn.MaxPool2d(kernel_size=2, stride=2)] 64 else: 65 conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) 66 if batch_norm: 67 layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] 68 else: 69 layers += [conv2d, nn.ReLU(inplace=True)] 70 in_channels = v 71 return nn.Sequential(*layers) 72 73 74 cfg = { 75 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 76 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 77 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 78 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], 79 } 80 81 82 def vgg11(pretrained=False, **kwargs): 83 """VGG 11-layer model (configuration "A") 84 85 Args: 86 pretrained (bool): If True, returns a model pre-trained on ImageNet 87 """ 88 model = VGG(make_layers(cfg['A']), **kwargs) 89 if pretrained: 90 model.load_state_dict(model_zoo.load_url(model_urls['vgg11'])) 91 return model 92 93 94 def vgg11_bn(**kwargs): 95 """VGG 11-layer model (configuration "A") with batch normalization""" 96 return VGG(make_layers(cfg['A'], batch_norm=True), **kwargs) 97 98 99 def vgg13(pretrained=False, **kwargs): 100 """VGG 13-layer model (configuration "B") 101 102 Args: 103 pretrained (bool): If True, returns a model pre-trained on ImageNet 104 """ 105 model = VGG(make_layers(cfg['B']), **kwargs) 106 if pretrained: 107 model.load_state_dict(model_zoo.load_url(model_urls['vgg13'])) 108 return model 109 110 111 def vgg13_bn(**kwargs): 112 """VGG 13-layer model (configuration "B") with batch normalization""" 113 return VGG(make_layers(cfg['B'], batch_norm=True), **kwargs) 114 115 116 def vgg16(pretrained=False, **kwargs): 117 """VGG 16-layer model (configuration "D") 118 119 Args: 120 pretrained (bool): If True, returns a model pre-trained on ImageNet 121 """ 122 model = VGG(make_layers(cfg['D']), **kwargs) 123 if pretrained: 124 model.load_state_dict(model_zoo.load_url(model_urls['vgg16'])) 125 return model 126 127 128 def vgg16_bn(**kwargs): 129 """VGG 16-layer model (configuration "D") with batch normalization""" 130 return VGG(make_layers(cfg['D'], batch_norm=True), **kwargs) 131 132 133 def vgg19(pretrained=False, **kwargs): 134 """VGG 19-layer model (configuration "E") 135 136 Args: 137 pretrained (bool): If True, returns a model pre-trained on ImageNet 138 """ 139 model = VGG(make_layers(cfg['E']), **kwargs) 140 if pretrained: 141 model.load_state_dict(model_zoo.load_url(model_urls['vgg19'])) 142 return model 143 144 145 def vgg19_bn(**kwargs): 146 """VGG 19-layer model (configuration 'E') with batch normalization""" 147 return VGG(make_layers(cfg['E'], batch_norm=True), **kwargs) 148 ``` Path: `torchvision/models/__init__.py` Content: ``` 1 """The models subpackage contains definitions for the following model 2 architectures: 3 4 - `AlexNet`_ 5 - `VGG`_ 6 - `ResNet`_ 7 - `SqueezeNet`_ 8 9 You can construct a model with random weights by calling its constructor: 10 11 .. code:: python 12 13 import torchvision.models as models 14 resnet18 = models.resnet18() 15 alexnet = models.alexnet() 16 squeezenet = models.squeezenet1_0() 17 18 We provide pre-trained models for the ResNet variants and AlexNet, using the 19 PyTorch :mod:`torch.utils.model_zoo`. These can constructed by passing 20 ``pretrained=True``: 21 22 .. code:: python 23 24 import torchvision.models as models 25 resnet18 = models.resnet18(pretrained=True) 26 alexnet = models.alexnet(pretrained=True) 27 28 ImageNet 1-crop error rates (224x224) 29 30 ======================== ============= ============= 31 Network Top-1 error Top-5 error 32 ======================== ============= ============= 33 ResNet-18 30.24 10.92 34 ResNet-34 26.70 8.58 35 ResNet-50 23.85 7.13 36 ResNet-101 22.63 6.44 37 ResNet-152 21.69 5.94 38 Inception v3 22.55 6.44 39 AlexNet 43.45 20.91 40 VGG-11 32.14 12.12 41 VGG-13 31.04 11.40 42 VGG-16 29.11 10.17 43 VGG-19 28.42 9.69 44 SqueezeNet 1.0 41.90 19.58 45 SqueezeNet 1.1 41.81 19.38 46 ======================== ============= ============= 47 48 49 .. _AlexNet: https://arxiv.org/abs/1404.5997 50 .. _VGG: https://arxiv.org/abs/1409.1556 51 .. _ResNet: https://arxiv.org/abs/1512.03385 52 .. _SqueezeNet: https://arxiv.org/abs/1602.07360 53 """ 54 55 from .alexnet import * 56 from .resnet import * 57 from .vgg import * 58 from .squeezenet import * 59 from .inception import * 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/models/__init__.py b/torchvision/models/__init__.py --- a/torchvision/models/__init__.py +++ b/torchvision/models/__init__.py @@ -37,10 +37,10 @@ ResNet-152 21.69 5.94 Inception v3 22.55 6.44 AlexNet 43.45 20.91 -VGG-11 32.14 12.12 -VGG-13 31.04 11.40 -VGG-16 29.11 10.17 -VGG-19 28.42 9.69 +VGG-11 30.98 11.37 +VGG-13 30.07 10.75 +VGG-16 28.41 9.62 +VGG-19 27.62 9.12 SqueezeNet 1.0 41.90 19.58 SqueezeNet 1.1 41.81 19.38 ======================== ============= ============= diff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py --- a/torchvision/models/vgg.py +++ b/torchvision/models/vgg.py @@ -10,10 +10,10 @@ model_urls = { - 'vgg11': 'https://download.pytorch.org/models/vgg11-fb7e83b2.pth', - 'vgg13': 'https://download.pytorch.org/models/vgg13-58758d87.pth', - 'vgg16': 'https://download.pytorch.org/models/vgg16-82412952.pth', - 'vgg19': 'https://download.pytorch.org/models/vgg19-341d7465.pth', + 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', } @@ -23,12 +23,12 @@ super(VGG, self).__init__() self.features = features self.classifier = nn.Sequential( - nn.Dropout(), nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), + nn.Dropout(), nn.Linear(4096, 1000), ) self._initialize_weights()
{"golden_diff": "diff --git a/torchvision/models/__init__.py b/torchvision/models/__init__.py\n--- a/torchvision/models/__init__.py\n+++ b/torchvision/models/__init__.py\n@@ -37,10 +37,10 @@\n ResNet-152 21.69 5.94\n Inception v3 22.55 6.44\n AlexNet 43.45 20.91\n-VGG-11 32.14 12.12\n-VGG-13 31.04 11.40\n-VGG-16 29.11 10.17\n-VGG-19 28.42 9.69\n+VGG-11 30.98 11.37\n+VGG-13 30.07 10.75\n+VGG-16 28.41 9.62\n+VGG-19 27.62 9.12\n SqueezeNet 1.0 41.90 19.58\n SqueezeNet 1.1 41.81 19.38\n ======================== ============= =============\ndiff --git a/torchvision/models/vgg.py b/torchvision/models/vgg.py\n--- a/torchvision/models/vgg.py\n+++ b/torchvision/models/vgg.py\n@@ -10,10 +10,10 @@\n \n \n model_urls = {\n- 'vgg11': 'https://download.pytorch.org/models/vgg11-fb7e83b2.pth',\n- 'vgg13': 'https://download.pytorch.org/models/vgg13-58758d87.pth',\n- 'vgg16': 'https://download.pytorch.org/models/vgg16-82412952.pth',\n- 'vgg19': 'https://download.pytorch.org/models/vgg19-341d7465.pth',\n+ 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n+ 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n+ 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n+ 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n }\n \n \n@@ -23,12 +23,12 @@\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n- nn.Dropout(),\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n+ nn.Dropout(),\n nn.Linear(4096, 1000),\n )\n self._initialize_weights()\n", "issue": "VGG classifier setting different from Original paper\nOne of the dropout layer was \"wrongly\" inserted.\r\nThe original final layers of Caffe Version (https://gist.github.com/ksimonyan/211839e770f7b538e2d8) is:\r\nself.classifier = nn.Sequential(\r\n nn.Linear(512 * 7 * 7, 4096),\r\n nn.ReLU(True),\r\n nn.Dropout(),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Dropout()\r\n nn.Linear(4096, 1000),\r\n )\r\nThis won't make difference when we use model.eval(), but will make discrepancy if we want to finetune VggNet by loading Caffe's parameters.\n", "before_files": [{"content": "import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport math\n\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-fb7e83b2.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-58758d87.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-82412952.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-341d7465.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features):\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Linear(4096, 1000),\n )\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg11(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['A']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\n return model\n\n\ndef vgg11_bn(**kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n\n\ndef vgg13(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model\n\n\ndef vgg13_bn(**kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)\n\n\ndef vgg16(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model\n\n\ndef vgg16_bn(**kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n\n\ndef vgg19(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration \"E\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['E']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model\n\n\ndef vgg19_bn(**kwargs):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\"\"\"\n return VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)\n", "path": "torchvision/models/vgg.py"}, {"content": "\"\"\"The models subpackage contains definitions for the following model\narchitectures:\n\n- `AlexNet`_\n- `VGG`_\n- `ResNet`_\n- `SqueezeNet`_\n\nYou can construct a model with random weights by calling its constructor:\n\n.. code:: python\n\n import torchvision.models as models\n resnet18 = models.resnet18()\n alexnet = models.alexnet()\n squeezenet = models.squeezenet1_0()\n\nWe provide pre-trained models for the ResNet variants and AlexNet, using the\nPyTorch :mod:`torch.utils.model_zoo`. These can constructed by passing\n``pretrained=True``:\n\n.. code:: python\n\n import torchvision.models as models\n resnet18 = models.resnet18(pretrained=True)\n alexnet = models.alexnet(pretrained=True)\n\nImageNet 1-crop error rates (224x224)\n\n======================== ============= =============\nNetwork Top-1 error Top-5 error\n======================== ============= =============\nResNet-18 30.24 10.92\nResNet-34 26.70 8.58\nResNet-50 23.85 7.13\nResNet-101 22.63 6.44\nResNet-152 21.69 5.94\nInception v3 22.55 6.44\nAlexNet 43.45 20.91\nVGG-11 32.14 12.12\nVGG-13 31.04 11.40\nVGG-16 29.11 10.17\nVGG-19 28.42 9.69\nSqueezeNet 1.0 41.90 19.58\nSqueezeNet 1.1 41.81 19.38\n======================== ============= =============\n\n\n.. _AlexNet: https://arxiv.org/abs/1404.5997\n.. _VGG: https://arxiv.org/abs/1409.1556\n.. _ResNet: https://arxiv.org/abs/1512.03385\n.. _SqueezeNet: https://arxiv.org/abs/1602.07360\n\"\"\"\n\nfrom .alexnet import *\nfrom .resnet import *\nfrom .vgg import *\nfrom .squeezenet import *\nfrom .inception import *\n", "path": "torchvision/models/__init__.py"}], "after_files": [{"content": "import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport math\n\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features):\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 1000),\n )\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg11(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['A']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\n return model\n\n\ndef vgg11_bn(**kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n\n\ndef vgg13(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model\n\n\ndef vgg13_bn(**kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)\n\n\ndef vgg16(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model\n\n\ndef vgg16_bn(**kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\"\"\"\n return VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n\n\ndef vgg19(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration \"E\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = VGG(make_layers(cfg['E']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model\n\n\ndef vgg19_bn(**kwargs):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\"\"\"\n return VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)\n", "path": "torchvision/models/vgg.py"}, {"content": "\"\"\"The models subpackage contains definitions for the following model\narchitectures:\n\n- `AlexNet`_\n- `VGG`_\n- `ResNet`_\n- `SqueezeNet`_\n\nYou can construct a model with random weights by calling its constructor:\n\n.. code:: python\n\n import torchvision.models as models\n resnet18 = models.resnet18()\n alexnet = models.alexnet()\n squeezenet = models.squeezenet1_0()\n\nWe provide pre-trained models for the ResNet variants and AlexNet, using the\nPyTorch :mod:`torch.utils.model_zoo`. These can constructed by passing\n``pretrained=True``:\n\n.. code:: python\n\n import torchvision.models as models\n resnet18 = models.resnet18(pretrained=True)\n alexnet = models.alexnet(pretrained=True)\n\nImageNet 1-crop error rates (224x224)\n\n======================== ============= =============\nNetwork Top-1 error Top-5 error\n======================== ============= =============\nResNet-18 30.24 10.92\nResNet-34 26.70 8.58\nResNet-50 23.85 7.13\nResNet-101 22.63 6.44\nResNet-152 21.69 5.94\nInception v3 22.55 6.44\nAlexNet 43.45 20.91\nVGG-11 30.98 11.37\nVGG-13 30.07 10.75\nVGG-16 28.41 9.62\nVGG-19 27.62 9.12\nSqueezeNet 1.0 41.90 19.58\nSqueezeNet 1.1 41.81 19.38\n======================== ============= =============\n\n\n.. _AlexNet: https://arxiv.org/abs/1404.5997\n.. _VGG: https://arxiv.org/abs/1409.1556\n.. _ResNet: https://arxiv.org/abs/1512.03385\n.. _SqueezeNet: https://arxiv.org/abs/1602.07360\n\"\"\"\n\nfrom .alexnet import *\nfrom .resnet import *\nfrom .vgg import *\nfrom .squeezenet import *\nfrom .inception import *\n", "path": "torchvision/models/__init__.py"}]}
3,024
745
gh_patches_debug_7616
rasdani/github-patches
git_diff
pyro-ppl__numpyro-1647
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error when importing numpyro-0.13.1 UPDATE: Bumping JAX to the latest version seems to fix the problem jax version = 0.4.14 Reproducible example in google colab: https://colab.research.google.com/drive/1R444hZjVV0KDaaksTE6Gf72DaH8rUqZt?usp=sharing ``` [/usr/local/lib/python3.10/dist-packages/numpyro/__init__.py](https://localhost:8080/#) in <module> 4 import logging 5 ----> 6 from numpyro import compat, diagnostics, distributions, handlers, infer, ops, optim 7 from numpyro.distributions.distribution import enable_validation, validation_enabled 8 from numpyro.infer.inspect import render_model [/usr/local/lib/python3.10/dist-packages/numpyro/infer/__init__.py](https://localhost:8080/#) in <module> 3 4 from numpyro.infer.barker import BarkerMH ----> 5 from numpyro.infer.elbo import ( 6 ELBO, 7 RenyiELBO, [/usr/local/lib/python3.10/dist-packages/numpyro/infer/elbo.py](https://localhost:8080/#) in <module> 23 log_density, 24 ) ---> 25 from numpyro.ops.provenance import eval_provenance 26 from numpyro.util import _validate_model, check_model_guide_match, find_stack_level 27 [/usr/local/lib/python3.10/dist-packages/numpyro/ops/provenance.py](https://localhost:8080/#) in <module> 6 import jax.core as core 7 from jax.experimental.pjit import pjit_p ----> 8 import jax.extend.linear_util as lu 9 from jax.interpreters.partial_eval import trace_to_jaxpr_dynamic 10 from jax.interpreters.pxla import xla_pmap_p ModuleNotFoundError: No module named 'jax.extend.linear_util' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `numpyro/ops/provenance.py` Content: ``` 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 import jax 5 from jax.api_util import flatten_fun, shaped_abstractify 6 import jax.core as core 7 from jax.experimental.pjit import pjit_p 8 import jax.extend.linear_util as lu 9 from jax.interpreters.partial_eval import trace_to_jaxpr_dynamic 10 from jax.interpreters.pxla import xla_pmap_p 11 import jax.numpy as jnp 12 13 14 def eval_provenance(fn, **kwargs): 15 """ 16 Compute the provenance output of ``fun`` using JAX's abstract 17 interpretation machinery. There is no actual array computation performed. 18 19 Example:: 20 21 >>> o = eval_provenance(lambda x, y, z: x + y, x=1, y=2, z=3) 22 >>> assert o == frozenset({"x", "y"}) 23 24 **References** 25 26 [1] David Wingate, Noah Goodman, Andreas StuhlmΓΌller, Jeffrey Siskind (2011) 27 Nonstandard Interpretations of Probabilistic Programs for Efficient Inference 28 http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf 29 [2] https://jax.readthedocs.io/en/latest/notebooks/Writing_custom_interpreters_in_Jax.html 30 31 :param fun: A callable to track provenance of its (keyword) arguments. 32 :param kwargs: Keyword arguments of `fun`. 33 :returns: A pytree of :class:`frozenset` indicating the dependency on the inputs. 34 """ 35 # Flatten the function and its arguments 36 args, in_tree = jax.tree_util.tree_flatten(((), kwargs)) 37 wrapped_fun, out_tree = flatten_fun(lu.wrap_init(fn), in_tree) 38 # Abstract eval to get output pytree 39 avals = core.safe_map(shaped_abstractify, args) 40 # XXX: we split out the process of abstract evaluation and provenance tracking 41 # for simplicity. In principle, they can be merged so that we only need to walk 42 # through the equations once. 43 jaxpr, avals_out, _ = trace_to_jaxpr_dynamic( 44 lu.wrap_init(wrapped_fun.call_wrapped, {}), avals 45 ) 46 47 # get provenances of flatten kwargs 48 aval_kwargs = {} 49 for n, v in kwargs.items(): 50 aval = jax.ShapeDtypeStruct((), jnp.bool_, {"provenance": frozenset({n})}) 51 aval_kwargs[n] = jax.tree_util.tree_map(lambda _: aval, v) 52 aval_args, _ = jax.tree_util.tree_flatten(((), aval_kwargs)) 53 provenance_inputs = jax.tree_util.tree_map( 54 lambda x: x.named_shape["provenance"], aval_args 55 ) 56 57 provenance_outputs = track_deps_jaxpr(jaxpr, provenance_inputs) 58 out_flat = [] 59 for v, p in zip(avals_out, provenance_outputs): 60 val = jax.ShapeDtypeStruct(jnp.shape(v), jnp.result_type(v), {"provenance": p}) 61 out_flat.append(val) 62 out = jax.tree_util.tree_unflatten(out_tree(), out_flat) 63 return jax.tree_util.tree_map(lambda x: x.named_shape["provenance"], out) 64 65 66 def track_deps_jaxpr(jaxpr, provenance_inputs): 67 # Mapping from variable -> provenance 68 env = {} 69 70 def read(v): 71 if isinstance(v, core.Literal): 72 return frozenset() 73 return env.get(v, frozenset()) 74 75 def write(v, p): 76 if isinstance(v, core.Literal): 77 return 78 env[v] = read(v) | p 79 80 core.safe_map(write, jaxpr.invars, provenance_inputs) 81 for eqn in jaxpr.eqns: 82 provenance_inputs = core.safe_map(read, eqn.invars) 83 rule = track_deps_rules.get(eqn.primitive, _default_track_deps_rules) 84 provenance_outputs = rule(eqn, provenance_inputs) 85 core.safe_map(write, eqn.outvars, provenance_outputs) 86 87 return core.safe_map(read, jaxpr.outvars) 88 89 90 track_deps_rules = {} 91 92 93 # XXX: Currently, we use default rule for scan_p, cond_p, while_p, remat_p 94 def _default_track_deps_rules(eqn, provenance_inputs): 95 provenance_outputs = frozenset().union(*provenance_inputs) 96 return [provenance_outputs] * len(eqn.outvars) 97 98 99 def track_deps_call_rule(eqn, provenance_inputs): 100 return track_deps_jaxpr(eqn.params["call_jaxpr"], provenance_inputs) 101 102 103 track_deps_rules[core.call_p] = track_deps_call_rule 104 track_deps_rules[xla_pmap_p] = track_deps_call_rule 105 106 107 def track_deps_closed_call_rule(eqn, provenance_inputs): 108 return track_deps_jaxpr(eqn.params["call_jaxpr"].jaxpr, provenance_inputs) 109 110 111 track_deps_rules[core.closed_call_p] = track_deps_closed_call_rule 112 113 114 def track_deps_pjit_rule(eqn, provenance_inputs): 115 return track_deps_jaxpr(eqn.params["jaxpr"].jaxpr, provenance_inputs) 116 117 118 track_deps_rules[pjit_p] = track_deps_pjit_rule 119 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/numpyro/ops/provenance.py b/numpyro/ops/provenance.py --- a/numpyro/ops/provenance.py +++ b/numpyro/ops/provenance.py @@ -5,7 +5,11 @@ from jax.api_util import flatten_fun, shaped_abstractify import jax.core as core from jax.experimental.pjit import pjit_p -import jax.extend.linear_util as lu + +try: + import jax.extend.linear_util as lu +except ImportError: + import jax.linear_util as lu from jax.interpreters.partial_eval import trace_to_jaxpr_dynamic from jax.interpreters.pxla import xla_pmap_p import jax.numpy as jnp
{"golden_diff": "diff --git a/numpyro/ops/provenance.py b/numpyro/ops/provenance.py\n--- a/numpyro/ops/provenance.py\n+++ b/numpyro/ops/provenance.py\n@@ -5,7 +5,11 @@\n from jax.api_util import flatten_fun, shaped_abstractify\n import jax.core as core\n from jax.experimental.pjit import pjit_p\n-import jax.extend.linear_util as lu\n+\n+try:\n+ import jax.extend.linear_util as lu\n+except ImportError:\n+ import jax.linear_util as lu\n from jax.interpreters.partial_eval import trace_to_jaxpr_dynamic\n from jax.interpreters.pxla import xla_pmap_p\n import jax.numpy as jnp\n", "issue": "Error when importing numpyro-0.13.1\nUPDATE: Bumping JAX to the latest version seems to fix the problem\r\n\r\njax version = 0.4.14\r\n\r\nReproducible example in google colab: https://colab.research.google.com/drive/1R444hZjVV0KDaaksTE6Gf72DaH8rUqZt?usp=sharing\r\n\r\n```\r\n[/usr/local/lib/python3.10/dist-packages/numpyro/__init__.py](https://localhost:8080/#) in <module>\r\n 4 import logging\r\n 5 \r\n----> 6 from numpyro import compat, diagnostics, distributions, handlers, infer, ops, optim\r\n 7 from numpyro.distributions.distribution import enable_validation, validation_enabled\r\n 8 from numpyro.infer.inspect import render_model\r\n\r\n[/usr/local/lib/python3.10/dist-packages/numpyro/infer/__init__.py](https://localhost:8080/#) in <module>\r\n 3 \r\n 4 from numpyro.infer.barker import BarkerMH\r\n----> 5 from numpyro.infer.elbo import (\r\n 6 ELBO,\r\n 7 RenyiELBO,\r\n\r\n[/usr/local/lib/python3.10/dist-packages/numpyro/infer/elbo.py](https://localhost:8080/#) in <module>\r\n 23 log_density,\r\n 24 )\r\n---> 25 from numpyro.ops.provenance import eval_provenance\r\n 26 from numpyro.util import _validate_model, check_model_guide_match, find_stack_level\r\n 27 \r\n\r\n[/usr/local/lib/python3.10/dist-packages/numpyro/ops/provenance.py](https://localhost:8080/#) in <module>\r\n 6 import jax.core as core\r\n 7 from jax.experimental.pjit import pjit_p\r\n----> 8 import jax.extend.linear_util as lu\r\n 9 from jax.interpreters.partial_eval import trace_to_jaxpr_dynamic\r\n 10 from jax.interpreters.pxla import xla_pmap_p\r\n\r\nModuleNotFoundError: No module named 'jax.extend.linear_util'\r\n```\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport jax\nfrom jax.api_util import flatten_fun, shaped_abstractify\nimport jax.core as core\nfrom jax.experimental.pjit import pjit_p\nimport jax.extend.linear_util as lu\nfrom jax.interpreters.partial_eval import trace_to_jaxpr_dynamic\nfrom jax.interpreters.pxla import xla_pmap_p\nimport jax.numpy as jnp\n\n\ndef eval_provenance(fn, **kwargs):\n \"\"\"\n Compute the provenance output of ``fun`` using JAX's abstract\n interpretation machinery. There is no actual array computation performed.\n\n Example::\n\n >>> o = eval_provenance(lambda x, y, z: x + y, x=1, y=2, z=3)\n >>> assert o == frozenset({\"x\", \"y\"})\n\n **References**\n\n [1] David Wingate, Noah Goodman, Andreas Stuhlm\u00fcller, Jeffrey Siskind (2011)\n Nonstandard Interpretations of Probabilistic Programs for Efficient Inference\n http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf\n [2] https://jax.readthedocs.io/en/latest/notebooks/Writing_custom_interpreters_in_Jax.html\n\n :param fun: A callable to track provenance of its (keyword) arguments.\n :param kwargs: Keyword arguments of `fun`.\n :returns: A pytree of :class:`frozenset` indicating the dependency on the inputs.\n \"\"\"\n # Flatten the function and its arguments\n args, in_tree = jax.tree_util.tree_flatten(((), kwargs))\n wrapped_fun, out_tree = flatten_fun(lu.wrap_init(fn), in_tree)\n # Abstract eval to get output pytree\n avals = core.safe_map(shaped_abstractify, args)\n # XXX: we split out the process of abstract evaluation and provenance tracking\n # for simplicity. In principle, they can be merged so that we only need to walk\n # through the equations once.\n jaxpr, avals_out, _ = trace_to_jaxpr_dynamic(\n lu.wrap_init(wrapped_fun.call_wrapped, {}), avals\n )\n\n # get provenances of flatten kwargs\n aval_kwargs = {}\n for n, v in kwargs.items():\n aval = jax.ShapeDtypeStruct((), jnp.bool_, {\"provenance\": frozenset({n})})\n aval_kwargs[n] = jax.tree_util.tree_map(lambda _: aval, v)\n aval_args, _ = jax.tree_util.tree_flatten(((), aval_kwargs))\n provenance_inputs = jax.tree_util.tree_map(\n lambda x: x.named_shape[\"provenance\"], aval_args\n )\n\n provenance_outputs = track_deps_jaxpr(jaxpr, provenance_inputs)\n out_flat = []\n for v, p in zip(avals_out, provenance_outputs):\n val = jax.ShapeDtypeStruct(jnp.shape(v), jnp.result_type(v), {\"provenance\": p})\n out_flat.append(val)\n out = jax.tree_util.tree_unflatten(out_tree(), out_flat)\n return jax.tree_util.tree_map(lambda x: x.named_shape[\"provenance\"], out)\n\n\ndef track_deps_jaxpr(jaxpr, provenance_inputs):\n # Mapping from variable -> provenance\n env = {}\n\n def read(v):\n if isinstance(v, core.Literal):\n return frozenset()\n return env.get(v, frozenset())\n\n def write(v, p):\n if isinstance(v, core.Literal):\n return\n env[v] = read(v) | p\n\n core.safe_map(write, jaxpr.invars, provenance_inputs)\n for eqn in jaxpr.eqns:\n provenance_inputs = core.safe_map(read, eqn.invars)\n rule = track_deps_rules.get(eqn.primitive, _default_track_deps_rules)\n provenance_outputs = rule(eqn, provenance_inputs)\n core.safe_map(write, eqn.outvars, provenance_outputs)\n\n return core.safe_map(read, jaxpr.outvars)\n\n\ntrack_deps_rules = {}\n\n\n# XXX: Currently, we use default rule for scan_p, cond_p, while_p, remat_p\ndef _default_track_deps_rules(eqn, provenance_inputs):\n provenance_outputs = frozenset().union(*provenance_inputs)\n return [provenance_outputs] * len(eqn.outvars)\n\n\ndef track_deps_call_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"call_jaxpr\"], provenance_inputs)\n\n\ntrack_deps_rules[core.call_p] = track_deps_call_rule\ntrack_deps_rules[xla_pmap_p] = track_deps_call_rule\n\n\ndef track_deps_closed_call_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"call_jaxpr\"].jaxpr, provenance_inputs)\n\n\ntrack_deps_rules[core.closed_call_p] = track_deps_closed_call_rule\n\n\ndef track_deps_pjit_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"jaxpr\"].jaxpr, provenance_inputs)\n\n\ntrack_deps_rules[pjit_p] = track_deps_pjit_rule\n", "path": "numpyro/ops/provenance.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport jax\nfrom jax.api_util import flatten_fun, shaped_abstractify\nimport jax.core as core\nfrom jax.experimental.pjit import pjit_p\n\ntry:\n import jax.extend.linear_util as lu\nexcept ImportError:\n import jax.linear_util as lu\nfrom jax.interpreters.partial_eval import trace_to_jaxpr_dynamic\nfrom jax.interpreters.pxla import xla_pmap_p\nimport jax.numpy as jnp\n\n\ndef eval_provenance(fn, **kwargs):\n \"\"\"\n Compute the provenance output of ``fun`` using JAX's abstract\n interpretation machinery. There is no actual array computation performed.\n\n Example::\n\n >>> o = eval_provenance(lambda x, y, z: x + y, x=1, y=2, z=3)\n >>> assert o == frozenset({\"x\", \"y\"})\n\n **References**\n\n [1] David Wingate, Noah Goodman, Andreas Stuhlm\u00fcller, Jeffrey Siskind (2011)\n Nonstandard Interpretations of Probabilistic Programs for Efficient Inference\n http://papers.neurips.cc/paper/4309-nonstandard-interpretations-of-probabilistic-programs-for-efficient-inference.pdf\n [2] https://jax.readthedocs.io/en/latest/notebooks/Writing_custom_interpreters_in_Jax.html\n\n :param fun: A callable to track provenance of its (keyword) arguments.\n :param kwargs: Keyword arguments of `fun`.\n :returns: A pytree of :class:`frozenset` indicating the dependency on the inputs.\n \"\"\"\n # Flatten the function and its arguments\n args, in_tree = jax.tree_util.tree_flatten(((), kwargs))\n wrapped_fun, out_tree = flatten_fun(lu.wrap_init(fn), in_tree)\n # Abstract eval to get output pytree\n avals = core.safe_map(shaped_abstractify, args)\n # XXX: we split out the process of abstract evaluation and provenance tracking\n # for simplicity. In principle, they can be merged so that we only need to walk\n # through the equations once.\n jaxpr, avals_out, _ = trace_to_jaxpr_dynamic(\n lu.wrap_init(wrapped_fun.call_wrapped, {}), avals\n )\n\n # get provenances of flatten kwargs\n aval_kwargs = {}\n for n, v in kwargs.items():\n aval = jax.ShapeDtypeStruct((), jnp.bool_, {\"provenance\": frozenset({n})})\n aval_kwargs[n] = jax.tree_util.tree_map(lambda _: aval, v)\n aval_args, _ = jax.tree_util.tree_flatten(((), aval_kwargs))\n provenance_inputs = jax.tree_util.tree_map(\n lambda x: x.named_shape[\"provenance\"], aval_args\n )\n\n provenance_outputs = track_deps_jaxpr(jaxpr, provenance_inputs)\n out_flat = []\n for v, p in zip(avals_out, provenance_outputs):\n val = jax.ShapeDtypeStruct(jnp.shape(v), jnp.result_type(v), {\"provenance\": p})\n out_flat.append(val)\n out = jax.tree_util.tree_unflatten(out_tree(), out_flat)\n return jax.tree_util.tree_map(lambda x: x.named_shape[\"provenance\"], out)\n\n\ndef track_deps_jaxpr(jaxpr, provenance_inputs):\n # Mapping from variable -> provenance\n env = {}\n\n def read(v):\n if isinstance(v, core.Literal):\n return frozenset()\n return env.get(v, frozenset())\n\n def write(v, p):\n if isinstance(v, core.Literal):\n return\n env[v] = read(v) | p\n\n core.safe_map(write, jaxpr.invars, provenance_inputs)\n for eqn in jaxpr.eqns:\n provenance_inputs = core.safe_map(read, eqn.invars)\n rule = track_deps_rules.get(eqn.primitive, _default_track_deps_rules)\n provenance_outputs = rule(eqn, provenance_inputs)\n core.safe_map(write, eqn.outvars, provenance_outputs)\n\n return core.safe_map(read, jaxpr.outvars)\n\n\ntrack_deps_rules = {}\n\n\n# XXX: Currently, we use default rule for scan_p, cond_p, while_p, remat_p\ndef _default_track_deps_rules(eqn, provenance_inputs):\n provenance_outputs = frozenset().union(*provenance_inputs)\n return [provenance_outputs] * len(eqn.outvars)\n\n\ndef track_deps_call_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"call_jaxpr\"], provenance_inputs)\n\n\ntrack_deps_rules[core.call_p] = track_deps_call_rule\ntrack_deps_rules[xla_pmap_p] = track_deps_call_rule\n\n\ndef track_deps_closed_call_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"call_jaxpr\"].jaxpr, provenance_inputs)\n\n\ntrack_deps_rules[core.closed_call_p] = track_deps_closed_call_rule\n\n\ndef track_deps_pjit_rule(eqn, provenance_inputs):\n return track_deps_jaxpr(eqn.params[\"jaxpr\"].jaxpr, provenance_inputs)\n\n\ntrack_deps_rules[pjit_p] = track_deps_pjit_rule\n", "path": "numpyro/ops/provenance.py"}]}
2,174
167
gh_patches_debug_16492
rasdani/github-patches
git_diff
googleapis__google-cloud-python-4174
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PubSub: Default thread-based subscriber Policy's on_response() implementation degenerates into series of synchronous calls because of debug statement https://github.com/GoogleCloudPlatform/google-cloud-python/blob/4e17158c0f14e0e91cbeb74c3e8483c5a231e9ac/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py#L147 While testing out the new PubSub API (v 0.28.3), I noticed that the default thread-based `Policy`, which uses a `ThreadPoolExecutor(max_workers=10)` seemed to be executing the subscriber callbacks sequentially, instead of asynchronously. Upon introspection of the source, it looks like the above referenced code: ``` ... future = self._executor.submit(self._callback, message) logger.debug('Result: %s' % future.result()) ``` will block until `self._callback` completes, because of the call to `future.result()`. Or am I losing my mind? Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py` Content: ``` 1 # Copyright 2017, Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 17 from concurrent import futures 18 from queue import Queue 19 import logging 20 import threading 21 22 import grpc 23 24 from google.cloud.pubsub_v1 import types 25 from google.cloud.pubsub_v1.subscriber import _helper_threads 26 from google.cloud.pubsub_v1.subscriber.policy import base 27 from google.cloud.pubsub_v1.subscriber.message import Message 28 29 30 logger = logging.getLogger(__name__) 31 32 33 class Policy(base.BasePolicy): 34 """A consumer class based on :class:`threading.Thread`. 35 36 This consumer handles the connection to the Pub/Sub service and all of 37 the concurrency needs. 38 """ 39 def __init__(self, client, subscription, flow_control=types.FlowControl(), 40 executor=None, queue=None): 41 """Instantiate the policy. 42 43 Args: 44 client (~.pubsub_v1.subscriber.client): The subscriber client used 45 to create this instance. 46 subscription (str): The name of the subscription. The canonical 47 format for this is 48 ``projects/{project}/subscriptions/{subscription}``. 49 flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow 50 control settings. 51 executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A 52 ThreadPoolExecutor instance, or anything duck-type compatible 53 with it. 54 queue (~queue.Queue): (Optional.) A Queue instance, appropriate 55 for crossing the concurrency boundary implemented by 56 ``executor``. 57 """ 58 # Default the callback to a no-op; it is provided by `.open`. 59 self._callback = lambda message: None 60 61 # Create a queue for keeping track of shared state. 62 if queue is None: 63 queue = Queue() 64 self._request_queue = queue 65 66 # Call the superclass constructor. 67 super(Policy, self).__init__( 68 client=client, 69 flow_control=flow_control, 70 subscription=subscription, 71 ) 72 73 # Also maintain a request queue and an executor. 74 logger.debug('Creating callback requests thread (not starting).') 75 if executor is None: 76 executor = futures.ThreadPoolExecutor(max_workers=10) 77 self._executor = executor 78 self._callback_requests = _helper_threads.QueueCallbackThread( 79 self._request_queue, 80 self.on_callback_request, 81 ) 82 83 def close(self): 84 """Close the existing connection.""" 85 # Close the main subscription connection. 86 self._consumer.helper_threads.stop('callback requests worker') 87 self._consumer.stop_consuming() 88 89 def open(self, callback): 90 """Open a streaming pull connection and begin receiving messages. 91 92 For each message received, the ``callback`` function is fired with 93 a :class:`~.pubsub_v1.subscriber.message.Message` as its only 94 argument. 95 96 Args: 97 callback (Callable): The callback function. 98 """ 99 # Start the thread to pass the requests. 100 logger.debug('Starting callback requests worker.') 101 self._callback = callback 102 self._consumer.helper_threads.start( 103 'callback requests worker', 104 self._request_queue, 105 self._callback_requests, 106 ) 107 108 # Actually start consuming messages. 109 self._consumer.start_consuming() 110 111 # Spawn a helper thread that maintains all of the leases for 112 # this policy. 113 logger.debug('Spawning lease maintenance worker.') 114 self._leaser = threading.Thread(target=self.maintain_leases) 115 self._leaser.daemon = True 116 self._leaser.start() 117 118 def on_callback_request(self, callback_request): 119 """Map the callback request to the appropriate GRPC request.""" 120 action, kwargs = callback_request[0], callback_request[1] 121 getattr(self, action)(**kwargs) 122 123 def on_exception(self, exception): 124 """Bubble the exception. 125 126 This will cause the stream to exit loudly. 127 """ 128 # If this is DEADLINE_EXCEEDED, then we want to retry. 129 # That entails just returning None. 130 deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED 131 if getattr(exception, 'code', lambda: None)() == deadline_exceeded: 132 return 133 134 # Raise any other exception. 135 raise exception 136 137 def on_response(self, response): 138 """Process all received Pub/Sub messages. 139 140 For each message, schedule a callback with the executor. 141 """ 142 for msg in response.received_messages: 143 logger.debug('New message received from Pub/Sub: %r', msg) 144 logger.debug(self._callback) 145 message = Message(msg.message, msg.ack_id, self._request_queue) 146 future = self._executor.submit(self._callback, message) 147 logger.debug('Result: %s' % future.result()) 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py --- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py +++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py @@ -30,6 +30,11 @@ logger = logging.getLogger(__name__) +def _callback_completed(future): + """Simple callback that just logs a `Future`'s result.""" + logger.debug('Result: %s', future.result()) + + class Policy(base.BasePolicy): """A consumer class based on :class:`threading.Thread`. @@ -144,4 +149,4 @@ logger.debug(self._callback) message = Message(msg.message, msg.ack_id, self._request_queue) future = self._executor.submit(self._callback, message) - logger.debug('Result: %s' % future.result()) + future.add_done_callback(_callback_completed)
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n--- a/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n+++ b/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py\n@@ -30,6 +30,11 @@\n logger = logging.getLogger(__name__)\n \n \n+def _callback_completed(future):\n+ \"\"\"Simple callback that just logs a `Future`'s result.\"\"\"\n+ logger.debug('Result: %s', future.result())\n+\n+\n class Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n \n@@ -144,4 +149,4 @@\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n- logger.debug('Result: %s' % future.result())\n+ future.add_done_callback(_callback_completed)\n", "issue": "PubSub: Default thread-based subscriber Policy's on_response() implementation degenerates into series of synchronous calls because of debug statement\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/4e17158c0f14e0e91cbeb74c3e8483c5a231e9ac/pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py#L147\r\n\r\nWhile testing out the new PubSub API (v 0.28.3), I noticed that the default thread-based `Policy`, which uses a `ThreadPoolExecutor(max_workers=10)` seemed to be executing the subscriber callbacks sequentially, instead of asynchronously. Upon introspection of the source, it looks like the above referenced code:\r\n```\r\n...\r\nfuture = self._executor.submit(self._callback, message)\r\nlogger.debug('Result: %s' % future.result())\r\n```\r\nwill block until `self._callback` completes, because of the call to `future.result()`. Or am I losing my mind? Thanks!\n", "before_files": [{"content": "# Copyright 2017, Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom concurrent import futures\nfrom queue import Queue\nimport logging\nimport threading\n\nimport grpc\n\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.subscriber import _helper_threads\nfrom google.cloud.pubsub_v1.subscriber.policy import base\nfrom google.cloud.pubsub_v1.subscriber.message import Message\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n\n This consumer handles the connection to the Pub/Sub service and all of\n the concurrency needs.\n \"\"\"\n def __init__(self, client, subscription, flow_control=types.FlowControl(),\n executor=None, queue=None):\n \"\"\"Instantiate the policy.\n\n Args:\n client (~.pubsub_v1.subscriber.client): The subscriber client used\n to create this instance.\n subscription (str): The name of the subscription. The canonical\n format for this is\n ``projects/{project}/subscriptions/{subscription}``.\n flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow\n control settings.\n executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A\n ThreadPoolExecutor instance, or anything duck-type compatible\n with it.\n queue (~queue.Queue): (Optional.) A Queue instance, appropriate\n for crossing the concurrency boundary implemented by\n ``executor``.\n \"\"\"\n # Default the callback to a no-op; it is provided by `.open`.\n self._callback = lambda message: None\n\n # Create a queue for keeping track of shared state.\n if queue is None:\n queue = Queue()\n self._request_queue = queue\n\n # Call the superclass constructor.\n super(Policy, self).__init__(\n client=client,\n flow_control=flow_control,\n subscription=subscription,\n )\n\n # Also maintain a request queue and an executor.\n logger.debug('Creating callback requests thread (not starting).')\n if executor is None:\n executor = futures.ThreadPoolExecutor(max_workers=10)\n self._executor = executor\n self._callback_requests = _helper_threads.QueueCallbackThread(\n self._request_queue,\n self.on_callback_request,\n )\n\n def close(self):\n \"\"\"Close the existing connection.\"\"\"\n # Close the main subscription connection.\n self._consumer.helper_threads.stop('callback requests worker')\n self._consumer.stop_consuming()\n\n def open(self, callback):\n \"\"\"Open a streaming pull connection and begin receiving messages.\n\n For each message received, the ``callback`` function is fired with\n a :class:`~.pubsub_v1.subscriber.message.Message` as its only\n argument.\n\n Args:\n callback (Callable): The callback function.\n \"\"\"\n # Start the thread to pass the requests.\n logger.debug('Starting callback requests worker.')\n self._callback = callback\n self._consumer.helper_threads.start(\n 'callback requests worker',\n self._request_queue,\n self._callback_requests,\n )\n\n # Actually start consuming messages.\n self._consumer.start_consuming()\n\n # Spawn a helper thread that maintains all of the leases for\n # this policy.\n logger.debug('Spawning lease maintenance worker.')\n self._leaser = threading.Thread(target=self.maintain_leases)\n self._leaser.daemon = True\n self._leaser.start()\n\n def on_callback_request(self, callback_request):\n \"\"\"Map the callback request to the appropriate GRPC request.\"\"\"\n action, kwargs = callback_request[0], callback_request[1]\n getattr(self, action)(**kwargs)\n\n def on_exception(self, exception):\n \"\"\"Bubble the exception.\n\n This will cause the stream to exit loudly.\n \"\"\"\n # If this is DEADLINE_EXCEEDED, then we want to retry.\n # That entails just returning None.\n deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED\n if getattr(exception, 'code', lambda: None)() == deadline_exceeded:\n return\n\n # Raise any other exception.\n raise exception\n\n def on_response(self, response):\n \"\"\"Process all received Pub/Sub messages.\n\n For each message, schedule a callback with the executor.\n \"\"\"\n for msg in response.received_messages:\n logger.debug('New message received from Pub/Sub: %r', msg)\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n logger.debug('Result: %s' % future.result())\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py"}], "after_files": [{"content": "# Copyright 2017, Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom concurrent import futures\nfrom queue import Queue\nimport logging\nimport threading\n\nimport grpc\n\nfrom google.cloud.pubsub_v1 import types\nfrom google.cloud.pubsub_v1.subscriber import _helper_threads\nfrom google.cloud.pubsub_v1.subscriber.policy import base\nfrom google.cloud.pubsub_v1.subscriber.message import Message\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _callback_completed(future):\n \"\"\"Simple callback that just logs a `Future`'s result.\"\"\"\n logger.debug('Result: %s', future.result())\n\n\nclass Policy(base.BasePolicy):\n \"\"\"A consumer class based on :class:`threading.Thread`.\n\n This consumer handles the connection to the Pub/Sub service and all of\n the concurrency needs.\n \"\"\"\n def __init__(self, client, subscription, flow_control=types.FlowControl(),\n executor=None, queue=None):\n \"\"\"Instantiate the policy.\n\n Args:\n client (~.pubsub_v1.subscriber.client): The subscriber client used\n to create this instance.\n subscription (str): The name of the subscription. The canonical\n format for this is\n ``projects/{project}/subscriptions/{subscription}``.\n flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow\n control settings.\n executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A\n ThreadPoolExecutor instance, or anything duck-type compatible\n with it.\n queue (~queue.Queue): (Optional.) A Queue instance, appropriate\n for crossing the concurrency boundary implemented by\n ``executor``.\n \"\"\"\n # Default the callback to a no-op; it is provided by `.open`.\n self._callback = lambda message: None\n\n # Create a queue for keeping track of shared state.\n if queue is None:\n queue = Queue()\n self._request_queue = queue\n\n # Call the superclass constructor.\n super(Policy, self).__init__(\n client=client,\n flow_control=flow_control,\n subscription=subscription,\n )\n\n # Also maintain a request queue and an executor.\n logger.debug('Creating callback requests thread (not starting).')\n if executor is None:\n executor = futures.ThreadPoolExecutor(max_workers=10)\n self._executor = executor\n self._callback_requests = _helper_threads.QueueCallbackThread(\n self._request_queue,\n self.on_callback_request,\n )\n\n def close(self):\n \"\"\"Close the existing connection.\"\"\"\n # Close the main subscription connection.\n self._consumer.helper_threads.stop('callback requests worker')\n self._consumer.stop_consuming()\n\n def open(self, callback):\n \"\"\"Open a streaming pull connection and begin receiving messages.\n\n For each message received, the ``callback`` function is fired with\n a :class:`~.pubsub_v1.subscriber.message.Message` as its only\n argument.\n\n Args:\n callback (Callable): The callback function.\n \"\"\"\n # Start the thread to pass the requests.\n logger.debug('Starting callback requests worker.')\n self._callback = callback\n self._consumer.helper_threads.start(\n 'callback requests worker',\n self._request_queue,\n self._callback_requests,\n )\n\n # Actually start consuming messages.\n self._consumer.start_consuming()\n\n # Spawn a helper thread that maintains all of the leases for\n # this policy.\n logger.debug('Spawning lease maintenance worker.')\n self._leaser = threading.Thread(target=self.maintain_leases)\n self._leaser.daemon = True\n self._leaser.start()\n\n def on_callback_request(self, callback_request):\n \"\"\"Map the callback request to the appropriate GRPC request.\"\"\"\n action, kwargs = callback_request[0], callback_request[1]\n getattr(self, action)(**kwargs)\n\n def on_exception(self, exception):\n \"\"\"Bubble the exception.\n\n This will cause the stream to exit loudly.\n \"\"\"\n # If this is DEADLINE_EXCEEDED, then we want to retry.\n # That entails just returning None.\n deadline_exceeded = grpc.StatusCode.DEADLINE_EXCEEDED\n if getattr(exception, 'code', lambda: None)() == deadline_exceeded:\n return\n\n # Raise any other exception.\n raise exception\n\n def on_response(self, response):\n \"\"\"Process all received Pub/Sub messages.\n\n For each message, schedule a callback with the executor.\n \"\"\"\n for msg in response.received_messages:\n logger.debug('New message received from Pub/Sub: %r', msg)\n logger.debug(self._callback)\n message = Message(msg.message, msg.ack_id, self._request_queue)\n future = self._executor.submit(self._callback, message)\n future.add_done_callback(_callback_completed)\n", "path": "pubsub/google/cloud/pubsub_v1/subscriber/policy/thread.py"}]}
1,945
224
gh_patches_debug_14721
rasdani/github-patches
git_diff
vas3k__vas3k.club-264
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- На страницС профиля Π² Π±Π»ΠΎΠΊΠ΅ "послСдниС ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚Π°Ρ€ΠΈΠΈ" Π²ΠΈΠ΄Π½Ρ‹ ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚Ρ‹ ΠΊ ΡƒΠ΄Π°Π»Π΅Π½Π½Ρ‹ΠΌ(скрытым) постам Π—Π°ΠΌΠ΅Ρ‡Π΅Π½ΠΎ, Ссли ΠΎΡ‚ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚ΠΈΡ€ΠΎΠ²Π°Ρ‚ΡŒ пост, ΠΊΠΎΡ‚ΠΎΡ€Ρ‹ΠΉ ΠΏΠΎΠ·ΠΆΠ΅ скрыли, Ρ‚ΠΎ Π² Π±Π»ΠΎΠΊΠ΅ "послСдниС ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚Π°Ρ€ΠΈΠΈ" этот ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚ всС Ρ€Π°Π²Π½ΠΎ остаСтся. ΠŸΡ€Π΅Π΄ΠΏΠΎΠ»Π°Π³Π°ΡŽ, Ρ‡Ρ‚ΠΎ Π½ΡƒΠΆΠ½ΠΎ всСго лишь ΠΏΠΎΠΏΡ€Π°Π²ΠΈΡ‚ΡŒ Ρ„ΠΈΠ»ΡŒΡ‚Ρ€ запроса Π½Π° ΠΊΠΎΠΌΠΌΠ΅Π½Ρ‚Π°Ρ€ΠΈΠΈ, Π΄ΠΎΠ±Π°Π²ΠΈΠ² ΠΏΡ€ΠΎΠ²Π΅Ρ€ΠΊΡƒ Π½Π° Π²ΠΈΠ΄ΠΈΠΌΠΎΡΡ‚ΡŒ связанного поста. Π§ΡƒΡ‚ΡŒ ΠΏΠΎΠ·ΠΆΠ΅ смогу завСсти ΠΏΡ€ΠΎΠ΅ΠΊΡ‚ Ρƒ сСбя Π½Π° Π»ΠΎΠΊΠ°Π»ΠΊΠ΅ ΠΈ сдСлаю PR :) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `users/views.py` Content: ``` 1 from datetime import datetime, timedelta 2 3 from django.conf import settings 4 from django.contrib.postgres.search import SearchQuery 5 from django.db.models import Count 6 from django.http import Http404 7 from django.shortcuts import render, redirect, get_object_or_404 8 from django_q.tasks import async_task 9 10 from auth.helpers import auth_required, moderator_role_required 11 from common.pagination import paginate 12 from common.request import ajax_request 13 from notifications.telegram.users import notify_profile_needs_review 14 from posts.models import Post 15 from comments.models import Comment 16 from search.models import SearchIndex 17 from users.admin import do_user_admin_actions 18 from users.forms.admin import UserAdminForm 19 from users.forms.intro import UserIntroForm 20 from users.forms.profile import UserEditForm, ExpertiseForm, NotificationsEditForm 21 from users.models.user import User 22 from users.models.expertise import UserExpertise 23 from users.models.badges import UserAchievement 24 from users.models.tags import Tag, UserTag 25 from users.models.geo import Geo 26 from utils.models import top, group_by 27 28 29 @auth_required 30 def intro(request): 31 if request.me.moderation_status == User.MODERATION_STATUS_APPROVED: 32 return redirect("profile", request.me.slug) 33 34 if request.method == "POST": 35 form = UserIntroForm(request.POST, request.FILES, instance=request.me) 36 if form.is_valid(): 37 user = form.save(commit=False) 38 39 # send to moderation 40 user.moderation_status = User.MODERATION_STATUS_ON_REVIEW 41 user.save() 42 43 # create intro post 44 intro_post = Post.upsert_user_intro( 45 user, form.cleaned_data["intro"], is_visible=False 46 ) 47 48 Geo.update_for_user(user) 49 50 # notify moderators to review profile 51 async_task(notify_profile_needs_review, user, intro_post) 52 53 return redirect("on_review") 54 else: 55 existing_intro = Post.get_user_intro(request.me) 56 form = UserIntroForm( 57 instance=request.me, 58 initial={"intro": existing_intro.text if existing_intro else ""}, 59 ) 60 61 return render(request, "users/intro.html", {"form": form}) 62 63 64 @auth_required 65 def profile(request, user_slug): 66 if user_slug == "me": 67 return redirect("profile", request.me.slug, permanent=False) 68 69 user = get_object_or_404(User, slug=user_slug) 70 71 if not request.me.is_moderator: 72 # hide unverified users 73 if user.moderation_status != User.MODERATION_STATUS_APPROVED: 74 raise Http404() 75 76 if user.id == request.me.id: 77 goto = request.GET.get("goto") 78 if goto and goto.startswith(settings.APP_HOST): 79 return redirect(goto) 80 81 tags = Tag.objects.filter(is_visible=True).all() 82 83 intro = Post.get_user_intro(user) 84 projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all() 85 active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()} 86 achievements = UserAchievement.objects.filter(user=user).select_related("achievement") 87 expertises = UserExpertise.objects.filter(user=user).all() 88 comments = Comment.visible_objects().filter(author=user).order_by("-created_at")[:3] 89 posts = Post.objects_for_user(request.me)\ 90 .filter(author=user, is_visible=True)\ 91 .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT]) 92 93 return render(request, "users/profile.html", { 94 "user": user, 95 "intro": intro, 96 "projects": projects, 97 "tags": tags, 98 "active_tags": active_tags, 99 "achievements": [ua.achievement for ua in achievements], 100 "expertises": expertises, 101 "comments": comments, 102 "posts": paginate(request, posts), 103 }) 104 105 106 @auth_required 107 def edit_profile(request, user_slug): 108 if user_slug == "me": 109 return redirect("edit_profile", request.me.slug, permanent=False) 110 111 user = get_object_or_404(User, slug=user_slug) 112 if user.id != request.me.id and not request.me.is_moderator: 113 raise Http404() 114 115 if request.method == "POST": 116 form = UserEditForm(request.POST, request.FILES, instance=user) 117 if form.is_valid(): 118 user = form.save(commit=False) 119 user.save() 120 121 SearchIndex.update_user_index(user) 122 Geo.update_for_user(user) 123 124 return redirect("profile", user.slug) 125 else: 126 form = UserEditForm(instance=user) 127 128 return render(request, "users/edit/profile.html", {"form": form, "user": user}) 129 130 131 @auth_required 132 def edit_notifications(request, user_slug): 133 if user_slug == "me": 134 return redirect("edit_notifications", request.me.slug, permanent=False) 135 136 user = get_object_or_404(User, slug=user_slug) 137 if user.id != request.me.id and not request.me.is_moderator: 138 raise Http404() 139 140 if request.method == "POST": 141 form = NotificationsEditForm(request.POST, instance=user) 142 if form.is_valid(): 143 user = form.save(commit=False) 144 user.save() 145 return redirect("profile", user.slug) 146 else: 147 form = NotificationsEditForm(instance=user) 148 149 return render(request, "users/edit/notifications.html", {"form": form, "user": user}) 150 151 152 @auth_required 153 def edit_bot(request, user_slug): 154 if user_slug == "me": 155 return redirect("edit_bot", request.me.slug, permanent=False) 156 157 user = get_object_or_404(User, slug=user_slug) 158 if user.id != request.me.id and not request.me.is_moderator: 159 raise Http404() 160 161 return render(request, "users/edit/bot.html", {"user": user}) 162 163 164 @auth_required 165 @moderator_role_required 166 def admin_profile(request, user_slug): 167 user = get_object_or_404(User, slug=user_slug) 168 169 if request.method == "POST": 170 form = UserAdminForm(request.POST, request.FILES) 171 if form.is_valid(): 172 return do_user_admin_actions(request, user, form.cleaned_data) 173 else: 174 form = UserAdminForm() 175 176 return render(request, "users/admin.html", {"user": user, "form": form}) 177 178 179 @auth_required 180 @ajax_request 181 def toggle_tag(request, tag_code): 182 if request.method != "POST": 183 raise Http404() 184 185 tag = get_object_or_404(Tag, code=tag_code) 186 187 user_tag, is_created = UserTag.objects.get_or_create( 188 user=request.me, tag=tag, defaults=dict(name=tag.name) 189 ) 190 191 if not is_created: 192 user_tag.delete() 193 194 SearchIndex.update_user_tags(request.me) 195 196 return { 197 "status": "created" if is_created else "deleted", 198 "tag": {"code": tag.code, "name": tag.name, "color": tag.color}, 199 } 200 201 202 @auth_required 203 @ajax_request 204 def add_expertise(request): 205 if request.method == "POST": 206 form = ExpertiseForm(request.POST) 207 if form.is_valid(): 208 user_expertise = form.save(commit=False) 209 user_expertise.user = request.me 210 UserExpertise.objects.filter( 211 user=request.me, expertise=user_expertise.expertise 212 ).delete() 213 user_expertise.save() 214 return { 215 "status": "created", 216 "expertise": { 217 "name": user_expertise.name, 218 "expertise": user_expertise.expertise, 219 "value": user_expertise.value, 220 }, 221 } 222 223 return {"status": "tipidor"} 224 225 226 @auth_required 227 @ajax_request 228 def delete_expertise(request, expertise): 229 if request.method == "POST": 230 UserExpertise.objects.filter(user=request.me, expertise=expertise).delete() 231 return { 232 "status": "deleted", 233 "expertise": { 234 "expertise": expertise, 235 }, 236 } 237 238 return {"status": "tipidor"} 239 240 241 @auth_required 242 def on_review(request): 243 if request.me.moderation_status == User.MODERATION_STATUS_APPROVED: 244 return redirect("profile", request.me.slug) 245 return render(request, "users/messages/on_review.html") 246 247 248 @auth_required 249 def rejected(request): 250 return render(request, "users/messages/rejected.html") 251 252 253 @auth_required 254 def banned(request): 255 return render(request, "users/messages/banned.html") 256 257 258 @auth_required 259 def people(request): 260 users = User.registered_members().order_by("-created_at").select_related("geo") 261 262 query = request.GET.get("query") 263 if query: 264 users = users.filter(index__index=SearchQuery(query, config="russian")) 265 266 tags = request.GET.getlist("tags") 267 if tags: 268 users = users.filter(index__tags__contains=tags) 269 270 country = request.GET.get("country") 271 if country: 272 users = users.filter(country=country) 273 274 filters = request.GET.getlist("filters") 275 if filters: 276 if "faang" in filters: 277 users = users.filter(company__in=[ 278 "Facebook", "Apple", "Google", "Amazon", "Netflix", "Microsoft", 279 "ЀСйсбук", "Π“ΡƒΠ³Π»", "Амазон", "НСтфликс", "ΠœΠ°ΠΉΠΊΡ€ΠΎΡΠΎΡ„Ρ‚", "ΠœΠΈΠΊΡ€ΠΎΡΠΎΡ„Ρ‚" 280 ]) 281 282 if "same_city" in filters: 283 users = users.filter(city=request.me.city) 284 285 if "activity" in filters: 286 users = users.filter(last_activity_at__gte=datetime.utcnow() - timedelta(days=30)) 287 288 tags_with_stats = Tag.tags_with_stats() 289 tag_stat_groups = group_by(tags_with_stats, "group", todict=True) 290 tag_stat_groups.update({ 291 "travel": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in { 292 "can_coffee", "can_city", "can_beer", "can_office", "can_sleep", 293 }], 294 "grow": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in { 295 "can_advice", "can_project", "can_teach", "search_idea", 296 "can_idea", "can_invest", "search_mentor", "can_mentor", "can_hobby" 297 }], 298 "work": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in { 299 "can_refer", "search_employees", "search_job", "search_remote", "search_relocate" 300 }], 301 }) 302 303 active_countries = User.registered_members().filter(country__isnull=False)\ 304 .values("country")\ 305 .annotate(country_count=Count("country"))\ 306 .order_by("-country_count") 307 308 map_stat_groups = { 309 "πŸ’Ό Π’ΠΎΠΏ ΠΊΠΎΠΌΠΏΠ°Π½ΠΈΠΉ": top(users, "company")[:5], 310 "🏰 Π“ΠΎΡ€ΠΎΠ΄Π°": top(users, "city")[:5], 311 "🎬 ЭкспСртиза": top(UserExpertise.objects.filter(user_id__in=[u.id for u in users]), "name")[:5], 312 } 313 314 return render(request, "users/people.html", { 315 "people_query": { 316 "query": query, 317 "country": country, 318 "tags": tags, 319 "filters": filters, 320 }, 321 "users": users, 322 "users_paginated": paginate(request, users, page_size=settings.PEOPLE_PAGE_SIZE), 323 "tag_stat_groups": tag_stat_groups, 324 "max_tag_user_count": max(tag.user_count for tag in tags_with_stats), 325 "active_countries": active_countries, 326 "map_stat_groups": map_stat_groups, 327 }) 328 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/users/views.py b/users/views.py --- a/users/views.py +++ b/users/views.py @@ -85,7 +85,7 @@ active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()} achievements = UserAchievement.objects.filter(user=user).select_related("achievement") expertises = UserExpertise.objects.filter(user=user).all() - comments = Comment.visible_objects().filter(author=user).order_by("-created_at")[:3] + comments = Comment.visible_objects().filter(author=user, post__is_visible=True).order_by("-created_at")[:3] posts = Post.objects_for_user(request.me)\ .filter(author=user, is_visible=True)\ .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])
{"golden_diff": "diff --git a/users/views.py b/users/views.py\n--- a/users/views.py\n+++ b/users/views.py\n@@ -85,7 +85,7 @@\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserAchievement.objects.filter(user=user).select_related(\"achievement\")\n expertises = UserExpertise.objects.filter(user=user).all()\n- comments = Comment.visible_objects().filter(author=user).order_by(\"-created_at\")[:3]\n+ comments = Comment.visible_objects().filter(author=user, post__is_visible=True).order_by(\"-created_at\")[:3]\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n", "issue": "\u041d\u0430 \u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0435 \u043f\u0440\u043e\u0444\u0438\u043b\u044f \u0432 \u0431\u043b\u043e\u043a\u0435 \"\u043f\u043e\u0441\u043b\u0435\u0434\u043d\u0438\u0435 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438\" \u0432\u0438\u0434\u043d\u044b \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u044b \u043a \u0443\u0434\u0430\u043b\u0435\u043d\u043d\u044b\u043c(\u0441\u043a\u0440\u044b\u0442\u044b\u043c) \u043f\u043e\u0441\u0442\u0430\u043c\n\u0417\u0430\u043c\u0435\u0447\u0435\u043d\u043e, \u0435\u0441\u043b\u0438 \u043e\u0442\u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0438\u0440\u043e\u0432\u0430\u0442\u044c \u043f\u043e\u0441\u0442, \u043a\u043e\u0442\u043e\u0440\u044b\u0439 \u043f\u043e\u0437\u0436\u0435 \u0441\u043a\u0440\u044b\u043b\u0438, \u0442\u043e \u0432 \u0431\u043b\u043e\u043a\u0435 \"\u043f\u043e\u0441\u043b\u0435\u0434\u043d\u0438\u0435 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438\" \u044d\u0442\u043e\u0442 \u043a\u043e\u043c\u043c\u0435\u043d\u0442 \u0432\u0441\u0435 \u0440\u0430\u0432\u043d\u043e \u043e\u0441\u0442\u0430\u0435\u0442\u0441\u044f. \r\n\r\n\u041f\u0440\u0435\u0434\u043f\u043e\u043b\u0430\u0433\u0430\u044e, \u0447\u0442\u043e \u043d\u0443\u0436\u043d\u043e \u0432\u0441\u0435\u0433\u043e \u043b\u0438\u0448\u044c \u043f\u043e\u043f\u0440\u0430\u0432\u0438\u0442\u044c \u0444\u0438\u043b\u044c\u0442\u0440 \u0437\u0430\u043f\u0440\u043e\u0441\u0430 \u043d\u0430 \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0438, \u0434\u043e\u0431\u0430\u0432\u0438\u0432 \u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0443 \u043d\u0430 \u0432\u0438\u0434\u0438\u043c\u043e\u0441\u0442\u044c \u0441\u0432\u044f\u0437\u0430\u043d\u043d\u043e\u0433\u043e \u043f\u043e\u0441\u0442\u0430.\r\n\r\n\u0427\u0443\u0442\u044c \u043f\u043e\u0437\u0436\u0435 \u0441\u043c\u043e\u0433\u0443 \u0437\u0430\u0432\u0435\u0441\u0442\u0438 \u043f\u0440\u043e\u0435\u043a\u0442 \u0443 \u0441\u0435\u0431\u044f \u043d\u0430 \u043b\u043e\u043a\u0430\u043b\u043a\u0435 \u0438 \u0441\u0434\u0435\u043b\u0430\u044e PR :)\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.contrib.postgres.search import SearchQuery\nfrom django.db.models import Count\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django_q.tasks import async_task\n\nfrom auth.helpers import auth_required, moderator_role_required\nfrom common.pagination import paginate\nfrom common.request import ajax_request\nfrom notifications.telegram.users import notify_profile_needs_review\nfrom posts.models import Post\nfrom comments.models import Comment\nfrom search.models import SearchIndex\nfrom users.admin import do_user_admin_actions\nfrom users.forms.admin import UserAdminForm\nfrom users.forms.intro import UserIntroForm\nfrom users.forms.profile import UserEditForm, ExpertiseForm, NotificationsEditForm\nfrom users.models.user import User\nfrom users.models.expertise import UserExpertise\nfrom users.models.badges import UserAchievement\nfrom users.models.tags import Tag, UserTag\nfrom users.models.geo import Geo\nfrom utils.models import top, group_by\n\n\n@auth_required\ndef intro(request):\n if request.me.moderation_status == User.MODERATION_STATUS_APPROVED:\n return redirect(\"profile\", request.me.slug)\n\n if request.method == \"POST\":\n form = UserIntroForm(request.POST, request.FILES, instance=request.me)\n if form.is_valid():\n user = form.save(commit=False)\n\n # send to moderation\n user.moderation_status = User.MODERATION_STATUS_ON_REVIEW\n user.save()\n\n # create intro post\n intro_post = Post.upsert_user_intro(\n user, form.cleaned_data[\"intro\"], is_visible=False\n )\n\n Geo.update_for_user(user)\n\n # notify moderators to review profile\n async_task(notify_profile_needs_review, user, intro_post)\n\n return redirect(\"on_review\")\n else:\n existing_intro = Post.get_user_intro(request.me)\n form = UserIntroForm(\n instance=request.me,\n initial={\"intro\": existing_intro.text if existing_intro else \"\"},\n )\n\n return render(request, \"users/intro.html\", {\"form\": form})\n\n\n@auth_required\ndef profile(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"profile\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n\n if not request.me.is_moderator:\n # hide unverified users\n if user.moderation_status != User.MODERATION_STATUS_APPROVED:\n raise Http404()\n\n if user.id == request.me.id:\n goto = request.GET.get(\"goto\")\n if goto and goto.startswith(settings.APP_HOST):\n return redirect(goto)\n\n tags = Tag.objects.filter(is_visible=True).all()\n\n intro = Post.get_user_intro(user)\n projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all()\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserAchievement.objects.filter(user=user).select_related(\"achievement\")\n expertises = UserExpertise.objects.filter(user=user).all()\n comments = Comment.visible_objects().filter(author=user).order_by(\"-created_at\")[:3]\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n\n return render(request, \"users/profile.html\", {\n \"user\": user,\n \"intro\": intro,\n \"projects\": projects,\n \"tags\": tags,\n \"active_tags\": active_tags,\n \"achievements\": [ua.achievement for ua in achievements],\n \"expertises\": expertises,\n \"comments\": comments,\n \"posts\": paginate(request, posts),\n })\n\n\n@auth_required\ndef edit_profile(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_profile\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = UserEditForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n\n SearchIndex.update_user_index(user)\n Geo.update_for_user(user)\n\n return redirect(\"profile\", user.slug)\n else:\n form = UserEditForm(instance=user)\n\n return render(request, \"users/edit/profile.html\", {\"form\": form, \"user\": user})\n\n\n@auth_required\ndef edit_notifications(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_notifications\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = NotificationsEditForm(request.POST, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n return redirect(\"profile\", user.slug)\n else:\n form = NotificationsEditForm(instance=user)\n\n return render(request, \"users/edit/notifications.html\", {\"form\": form, \"user\": user})\n\n\n@auth_required\ndef edit_bot(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_bot\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n return render(request, \"users/edit/bot.html\", {\"user\": user})\n\n\n@auth_required\n@moderator_role_required\ndef admin_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if request.method == \"POST\":\n form = UserAdminForm(request.POST, request.FILES)\n if form.is_valid():\n return do_user_admin_actions(request, user, form.cleaned_data)\n else:\n form = UserAdminForm()\n\n return render(request, \"users/admin.html\", {\"user\": user, \"form\": form})\n\n\n@auth_required\n@ajax_request\ndef toggle_tag(request, tag_code):\n if request.method != \"POST\":\n raise Http404()\n\n tag = get_object_or_404(Tag, code=tag_code)\n\n user_tag, is_created = UserTag.objects.get_or_create(\n user=request.me, tag=tag, defaults=dict(name=tag.name)\n )\n\n if not is_created:\n user_tag.delete()\n\n SearchIndex.update_user_tags(request.me)\n\n return {\n \"status\": \"created\" if is_created else \"deleted\",\n \"tag\": {\"code\": tag.code, \"name\": tag.name, \"color\": tag.color},\n }\n\n\n@auth_required\n@ajax_request\ndef add_expertise(request):\n if request.method == \"POST\":\n form = ExpertiseForm(request.POST)\n if form.is_valid():\n user_expertise = form.save(commit=False)\n user_expertise.user = request.me\n UserExpertise.objects.filter(\n user=request.me, expertise=user_expertise.expertise\n ).delete()\n user_expertise.save()\n return {\n \"status\": \"created\",\n \"expertise\": {\n \"name\": user_expertise.name,\n \"expertise\": user_expertise.expertise,\n \"value\": user_expertise.value,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\n@ajax_request\ndef delete_expertise(request, expertise):\n if request.method == \"POST\":\n UserExpertise.objects.filter(user=request.me, expertise=expertise).delete()\n return {\n \"status\": \"deleted\",\n \"expertise\": {\n \"expertise\": expertise,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\ndef on_review(request):\n if request.me.moderation_status == User.MODERATION_STATUS_APPROVED:\n return redirect(\"profile\", request.me.slug)\n return render(request, \"users/messages/on_review.html\")\n\n\n@auth_required\ndef rejected(request):\n return render(request, \"users/messages/rejected.html\")\n\n\n@auth_required\ndef banned(request):\n return render(request, \"users/messages/banned.html\")\n\n\n@auth_required\ndef people(request):\n users = User.registered_members().order_by(\"-created_at\").select_related(\"geo\")\n\n query = request.GET.get(\"query\")\n if query:\n users = users.filter(index__index=SearchQuery(query, config=\"russian\"))\n\n tags = request.GET.getlist(\"tags\")\n if tags:\n users = users.filter(index__tags__contains=tags)\n\n country = request.GET.get(\"country\")\n if country:\n users = users.filter(country=country)\n\n filters = request.GET.getlist(\"filters\")\n if filters:\n if \"faang\" in filters:\n users = users.filter(company__in=[\n \"Facebook\", \"Apple\", \"Google\", \"Amazon\", \"Netflix\", \"Microsoft\",\n \"\u0424\u0435\u0439\u0441\u0431\u0443\u043a\", \"\u0413\u0443\u0433\u043b\", \"\u0410\u043c\u0430\u0437\u043e\u043d\", \"\u041d\u0435\u0442\u0444\u043b\u0438\u043a\u0441\", \"\u041c\u0430\u0439\u043a\u0440\u043e\u0441\u043e\u0444\u0442\", \"\u041c\u0438\u043a\u0440\u043e\u0441\u043e\u0444\u0442\"\n ])\n\n if \"same_city\" in filters:\n users = users.filter(city=request.me.city)\n\n if \"activity\" in filters:\n users = users.filter(last_activity_at__gte=datetime.utcnow() - timedelta(days=30))\n\n tags_with_stats = Tag.tags_with_stats()\n tag_stat_groups = group_by(tags_with_stats, \"group\", todict=True)\n tag_stat_groups.update({\n \"travel\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_coffee\", \"can_city\", \"can_beer\", \"can_office\", \"can_sleep\",\n }],\n \"grow\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_advice\", \"can_project\", \"can_teach\", \"search_idea\",\n \"can_idea\", \"can_invest\", \"search_mentor\", \"can_mentor\", \"can_hobby\"\n }],\n \"work\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_refer\", \"search_employees\", \"search_job\", \"search_remote\", \"search_relocate\"\n }],\n })\n\n active_countries = User.registered_members().filter(country__isnull=False)\\\n .values(\"country\")\\\n .annotate(country_count=Count(\"country\"))\\\n .order_by(\"-country_count\")\n\n map_stat_groups = {\n \"\ud83d\udcbc \u0422\u043e\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0439\": top(users, \"company\")[:5],\n \"\ud83c\udff0 \u0413\u043e\u0440\u043e\u0434\u0430\": top(users, \"city\")[:5],\n \"\ud83c\udfac \u042d\u043a\u0441\u043f\u0435\u0440\u0442\u0438\u0437\u0430\": top(UserExpertise.objects.filter(user_id__in=[u.id for u in users]), \"name\")[:5],\n }\n\n return render(request, \"users/people.html\", {\n \"people_query\": {\n \"query\": query,\n \"country\": country,\n \"tags\": tags,\n \"filters\": filters,\n },\n \"users\": users,\n \"users_paginated\": paginate(request, users, page_size=settings.PEOPLE_PAGE_SIZE),\n \"tag_stat_groups\": tag_stat_groups,\n \"max_tag_user_count\": max(tag.user_count for tag in tags_with_stats),\n \"active_countries\": active_countries,\n \"map_stat_groups\": map_stat_groups,\n })\n", "path": "users/views.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.conf import settings\nfrom django.contrib.postgres.search import SearchQuery\nfrom django.db.models import Count\nfrom django.http import Http404\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django_q.tasks import async_task\n\nfrom auth.helpers import auth_required, moderator_role_required\nfrom common.pagination import paginate\nfrom common.request import ajax_request\nfrom notifications.telegram.users import notify_profile_needs_review\nfrom posts.models import Post\nfrom comments.models import Comment\nfrom search.models import SearchIndex\nfrom users.admin import do_user_admin_actions\nfrom users.forms.admin import UserAdminForm\nfrom users.forms.intro import UserIntroForm\nfrom users.forms.profile import UserEditForm, ExpertiseForm, NotificationsEditForm\nfrom users.models.user import User\nfrom users.models.expertise import UserExpertise\nfrom users.models.badges import UserAchievement\nfrom users.models.tags import Tag, UserTag\nfrom users.models.geo import Geo\nfrom utils.models import top, group_by\n\n\n@auth_required\ndef intro(request):\n if request.me.moderation_status == User.MODERATION_STATUS_APPROVED:\n return redirect(\"profile\", request.me.slug)\n\n if request.method == \"POST\":\n form = UserIntroForm(request.POST, request.FILES, instance=request.me)\n if form.is_valid():\n user = form.save(commit=False)\n\n # send to moderation\n user.moderation_status = User.MODERATION_STATUS_ON_REVIEW\n user.save()\n\n # create intro post\n intro_post = Post.upsert_user_intro(\n user, form.cleaned_data[\"intro\"], is_visible=False\n )\n\n Geo.update_for_user(user)\n\n # notify moderators to review profile\n async_task(notify_profile_needs_review, user, intro_post)\n\n return redirect(\"on_review\")\n else:\n existing_intro = Post.get_user_intro(request.me)\n form = UserIntroForm(\n instance=request.me,\n initial={\"intro\": existing_intro.text if existing_intro else \"\"},\n )\n\n return render(request, \"users/intro.html\", {\"form\": form})\n\n\n@auth_required\ndef profile(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"profile\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n\n if not request.me.is_moderator:\n # hide unverified users\n if user.moderation_status != User.MODERATION_STATUS_APPROVED:\n raise Http404()\n\n if user.id == request.me.id:\n goto = request.GET.get(\"goto\")\n if goto and goto.startswith(settings.APP_HOST):\n return redirect(goto)\n\n tags = Tag.objects.filter(is_visible=True).all()\n\n intro = Post.get_user_intro(user)\n projects = Post.objects.filter(author=user, type=Post.TYPE_PROJECT).all()\n active_tags = {t.tag_id for t in UserTag.objects.filter(user=user).all()}\n achievements = UserAchievement.objects.filter(user=user).select_related(\"achievement\")\n expertises = UserExpertise.objects.filter(user=user).all()\n comments = Comment.visible_objects().filter(author=user, post__is_visible=True).order_by(\"-created_at\")[:3]\n posts = Post.objects_for_user(request.me)\\\n .filter(author=user, is_visible=True)\\\n .exclude(type__in=[Post.TYPE_INTRO, Post.TYPE_PROJECT])\n\n return render(request, \"users/profile.html\", {\n \"user\": user,\n \"intro\": intro,\n \"projects\": projects,\n \"tags\": tags,\n \"active_tags\": active_tags,\n \"achievements\": [ua.achievement for ua in achievements],\n \"expertises\": expertises,\n \"comments\": comments,\n \"posts\": paginate(request, posts),\n })\n\n\n@auth_required\ndef edit_profile(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_profile\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = UserEditForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n\n SearchIndex.update_user_index(user)\n Geo.update_for_user(user)\n\n return redirect(\"profile\", user.slug)\n else:\n form = UserEditForm(instance=user)\n\n return render(request, \"users/edit/profile.html\", {\"form\": form, \"user\": user})\n\n\n@auth_required\ndef edit_notifications(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_notifications\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n if request.method == \"POST\":\n form = NotificationsEditForm(request.POST, instance=user)\n if form.is_valid():\n user = form.save(commit=False)\n user.save()\n return redirect(\"profile\", user.slug)\n else:\n form = NotificationsEditForm(instance=user)\n\n return render(request, \"users/edit/notifications.html\", {\"form\": form, \"user\": user})\n\n\n@auth_required\ndef edit_bot(request, user_slug):\n if user_slug == \"me\":\n return redirect(\"edit_bot\", request.me.slug, permanent=False)\n\n user = get_object_or_404(User, slug=user_slug)\n if user.id != request.me.id and not request.me.is_moderator:\n raise Http404()\n\n return render(request, \"users/edit/bot.html\", {\"user\": user})\n\n\n@auth_required\n@moderator_role_required\ndef admin_profile(request, user_slug):\n user = get_object_or_404(User, slug=user_slug)\n\n if request.method == \"POST\":\n form = UserAdminForm(request.POST, request.FILES)\n if form.is_valid():\n return do_user_admin_actions(request, user, form.cleaned_data)\n else:\n form = UserAdminForm()\n\n return render(request, \"users/admin.html\", {\"user\": user, \"form\": form})\n\n\n@auth_required\n@ajax_request\ndef toggle_tag(request, tag_code):\n if request.method != \"POST\":\n raise Http404()\n\n tag = get_object_or_404(Tag, code=tag_code)\n\n user_tag, is_created = UserTag.objects.get_or_create(\n user=request.me, tag=tag, defaults=dict(name=tag.name)\n )\n\n if not is_created:\n user_tag.delete()\n\n SearchIndex.update_user_tags(request.me)\n\n return {\n \"status\": \"created\" if is_created else \"deleted\",\n \"tag\": {\"code\": tag.code, \"name\": tag.name, \"color\": tag.color},\n }\n\n\n@auth_required\n@ajax_request\ndef add_expertise(request):\n if request.method == \"POST\":\n form = ExpertiseForm(request.POST)\n if form.is_valid():\n user_expertise = form.save(commit=False)\n user_expertise.user = request.me\n UserExpertise.objects.filter(\n user=request.me, expertise=user_expertise.expertise\n ).delete()\n user_expertise.save()\n return {\n \"status\": \"created\",\n \"expertise\": {\n \"name\": user_expertise.name,\n \"expertise\": user_expertise.expertise,\n \"value\": user_expertise.value,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\n@ajax_request\ndef delete_expertise(request, expertise):\n if request.method == \"POST\":\n UserExpertise.objects.filter(user=request.me, expertise=expertise).delete()\n return {\n \"status\": \"deleted\",\n \"expertise\": {\n \"expertise\": expertise,\n },\n }\n\n return {\"status\": \"tipidor\"}\n\n\n@auth_required\ndef on_review(request):\n if request.me.moderation_status == User.MODERATION_STATUS_APPROVED:\n return redirect(\"profile\", request.me.slug)\n return render(request, \"users/messages/on_review.html\")\n\n\n@auth_required\ndef rejected(request):\n return render(request, \"users/messages/rejected.html\")\n\n\n@auth_required\ndef banned(request):\n return render(request, \"users/messages/banned.html\")\n\n\n@auth_required\ndef people(request):\n users = User.registered_members().order_by(\"-created_at\").select_related(\"geo\")\n\n query = request.GET.get(\"query\")\n if query:\n users = users.filter(index__index=SearchQuery(query, config=\"russian\"))\n\n tags = request.GET.getlist(\"tags\")\n if tags:\n users = users.filter(index__tags__contains=tags)\n\n country = request.GET.get(\"country\")\n if country:\n users = users.filter(country=country)\n\n filters = request.GET.getlist(\"filters\")\n if filters:\n if \"faang\" in filters:\n users = users.filter(company__in=[\n \"Facebook\", \"Apple\", \"Google\", \"Amazon\", \"Netflix\", \"Microsoft\",\n \"\u0424\u0435\u0439\u0441\u0431\u0443\u043a\", \"\u0413\u0443\u0433\u043b\", \"\u0410\u043c\u0430\u0437\u043e\u043d\", \"\u041d\u0435\u0442\u0444\u043b\u0438\u043a\u0441\", \"\u041c\u0430\u0439\u043a\u0440\u043e\u0441\u043e\u0444\u0442\", \"\u041c\u0438\u043a\u0440\u043e\u0441\u043e\u0444\u0442\"\n ])\n\n if \"same_city\" in filters:\n users = users.filter(city=request.me.city)\n\n if \"activity\" in filters:\n users = users.filter(last_activity_at__gte=datetime.utcnow() - timedelta(days=30))\n\n tags_with_stats = Tag.tags_with_stats()\n tag_stat_groups = group_by(tags_with_stats, \"group\", todict=True)\n tag_stat_groups.update({\n \"travel\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_coffee\", \"can_city\", \"can_beer\", \"can_office\", \"can_sleep\",\n }],\n \"grow\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_advice\", \"can_project\", \"can_teach\", \"search_idea\",\n \"can_idea\", \"can_invest\", \"search_mentor\", \"can_mentor\", \"can_hobby\"\n }],\n \"work\": [tag for tag in tag_stat_groups[Tag.GROUP_CLUB] if tag.code in {\n \"can_refer\", \"search_employees\", \"search_job\", \"search_remote\", \"search_relocate\"\n }],\n })\n\n active_countries = User.registered_members().filter(country__isnull=False)\\\n .values(\"country\")\\\n .annotate(country_count=Count(\"country\"))\\\n .order_by(\"-country_count\")\n\n map_stat_groups = {\n \"\ud83d\udcbc \u0422\u043e\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0439\": top(users, \"company\")[:5],\n \"\ud83c\udff0 \u0413\u043e\u0440\u043e\u0434\u0430\": top(users, \"city\")[:5],\n \"\ud83c\udfac \u042d\u043a\u0441\u043f\u0435\u0440\u0442\u0438\u0437\u0430\": top(UserExpertise.objects.filter(user_id__in=[u.id for u in users]), \"name\")[:5],\n }\n\n return render(request, \"users/people.html\", {\n \"people_query\": {\n \"query\": query,\n \"country\": country,\n \"tags\": tags,\n \"filters\": filters,\n },\n \"users\": users,\n \"users_paginated\": paginate(request, users, page_size=settings.PEOPLE_PAGE_SIZE),\n \"tag_stat_groups\": tag_stat_groups,\n \"max_tag_user_count\": max(tag.user_count for tag in tags_with_stats),\n \"active_countries\": active_countries,\n \"map_stat_groups\": map_stat_groups,\n })\n", "path": "users/views.py"}]}
3,803
170
gh_patches_debug_8144
rasdani/github-patches
git_diff
pymodbus-dev__pymodbus-1853
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AsyncModbusSerialClient crashes when slave_id is not present ### Versions - Python: 3.11 - OS: Windows 11 (ver 22H2, 22621.2428) - Pymodbus: 3.6.0dev - Modbus Hardware (if used): USB Serial ### Pymodbus Specific - Client: rtu - async ### Description Using AsyncModbusSerialClient, attempting to read from a slave ID that is not present on the serial bus causes an AttributeError ### Code and Logs ```python import asyncio import sys import pymodbus from pymodbus.client import AsyncModbusSerialClient, ModbusSerialClient # ---------------------------------------------------------------------- async def event_main_loop(): mbparams = dict( timeout=0.5, retries=0, no_resend_on_retry=True, reconnect_delay=0, ) mbdev = AsyncModbusSerialClient("COM6", baudrate=19200, **mbparams) await mbdev.connect() try: res = await mbdev.read_holding_registers(0, 3, slave=1) print("read result: ", res) except pymodbus.exceptions.ModbusException as e: print("Blah: ", e) res = None if res and not res.isError(): print("Read returned:", res.registers) mbdev.close() # - - - - - - - - - - - - - - - - - - - - - - - - - - def event_main_loop_sync(): mbparams = dict( timeout=0.5, retries=0, no_resend_on_retry=True, reconnect_delay=0, ) mbdev = ModbusSerialClient("COM6", baudrate=19200, **mbparams) mbdev.connect() try: res = mbdev.read_holding_registers(0, 3, slave=1) print("read result: ", res) except pymodbus.exceptions.ModbusException as e: print("Blah: ", e) res = None if res and not res.isError(): print("Read returned:", res.registers) mbdev.close() if __name__ == "__main__": print(f"Python version: {sys.version_info}, {pymodbus.__version_full__}") pymodbus.pymodbus_apply_logging_config(log_file_name="t-mbserial.log") #event_main_loop_sync() asyncio.run(event_main_loop()) ``` # Log: ``` Python version: sys.version_info(major=3, minor=11, micro=5, releaselevel='final', serial=0), [pymodbus, version 3.6.0dev] 2023-10-22 23:33:59,440 DEBUG logging:103 Connecting to COM6. 2023-10-22 23:33:59,440 DEBUG logging:103 Connecting comm 2023-10-22 23:33:59,456 DEBUG logging:103 Connected to comm 2023-10-22 23:33:59,456 DEBUG logging:103 callback_connected called 2023-10-22 23:33:59,456 DEBUG logging:103 send: 0x1 0x3 0x0 0x0 0x0 0x3 0x5 0xcb 2023-10-22 23:33:59,456 DEBUG logging:103 Adding transaction 1 2023-10-22 23:33:59,958 DEBUG logging:103 Connection lost comm due to Server not responding 2023-10-22 23:34:00,068 DEBUG logging:103 Connection lost comm due to None 2023-10-22 23:34:00,068 DEBUG logging:103 Getting transaction 1 Traceback (most recent call last): File "C:\ctw\t-mbserial-async-noslave\t-mbserial-async-noslave.py", line 61, in <module> asyncio.run(event_main_loop()) File "c:\ctw\0-dev\Python311\Lib\asyncio\runners.py", line 190, in run return runner.run(main) ^^^^^^^^^^^^^^^^ File "c:\ctw\0-dev\Python311\Lib\asyncio\runners.py", line 118, in run return self._loop.run_until_complete(task) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "c:\ctw\0-dev\Python311\Lib\asyncio\base_events.py", line 653, in run_until_complete return future.result() ^^^^^^^^^^^^^^^ File "C:\ctw\t-mbserial-async-noslave\t-mbserial-async-noslave.py", line 22, in event_main_loop res = await mbdev.read_holding_registers(0, 3, slave=1) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\ctw\t-mbserial-async-noslave\pymodbus\client\base.py", line 207, in async_execute self.close(reconnect=True) File "C:\ctw\t-mbserial-async-noslave\pymodbus\client\serial.py", line 106, in close super().close(reconnect=reconnect) File "C:\ctw\t-mbserial-async-noslave\pymodbus\client\base.py", line 154, in close self.connection_lost(asyncio.TimeoutError("Server not responding")) File "C:\ctw\t-mbserial-async-noslave\pymodbus\transport\transport.py", line 300, in connection_lost self.transport_close(intern=True) File "C:\ctw\t-mbserial-async-noslave\pymodbus\transport\transport.py", line 419, in transport_close self.transport.close() ^^^^^^^^^^^^^^^^^^^^ AttributeError: 'NoneType' object has no attribute 'close' ``` When using the sync version, the code does not crash: ``` Python version: sys.version_info(major=3, minor=11, micro=5, releaselevel='final', serial=0), [pymodbus, version 3.6.0dev] 2023-10-22 23:35:35,780 DEBUG logging:103 Current transaction state - IDLE 2023-10-22 23:35:35,780 DEBUG logging:103 Running transaction 1 2023-10-22 23:35:35,780 DEBUG logging:103 SEND: 0x1 0x3 0x0 0x0 0x0 0x3 0x5 0xcb 2023-10-22 23:35:35,780 DEBUG logging:103 New Transaction state "SENDING" 2023-10-22 23:35:35,780 DEBUG logging:103 Changing transaction state from "SENDING" to "WAITING FOR REPLY" 2023-10-22 23:35:36,902 DEBUG logging:103 Transaction failed. (Modbus Error: [Invalid Message] No response received, expected at least 4 bytes (0 received)) 2023-10-22 23:35:36,902 DEBUG logging:103 Processing: 2023-10-22 23:35:36,902 DEBUG logging:103 Getting transaction 1 2023-10-22 23:35:36,902 DEBUG logging:103 Changing transaction state from "PROCESSING REPLY" to "TRANSACTION_COMPLETE" read result: Modbus Error: [Input/Output] Modbus Error: [Invalid Message] No response received, expected at least 4 bytes (0 received) ``` Note: the same issue happens with the current latest pymodbus v3.5.4. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pymodbus/transport/transport_serial.py` Content: ``` 1 """asyncio serial support for modbus (based on pyserial).""" 2 from __future__ import annotations 3 4 import asyncio 5 import contextlib 6 import os 7 8 9 with contextlib.suppress(ImportError): 10 import serial 11 12 13 class SerialTransport(asyncio.Transport): 14 """An asyncio serial transport.""" 15 16 force_poll: bool = False 17 18 def __init__(self, loop, protocol, *args, **kwargs) -> None: 19 """Initialize.""" 20 super().__init__() 21 self.async_loop = loop 22 self._protocol: asyncio.BaseProtocol = protocol 23 self.sync_serial = serial.serial_for_url(*args, **kwargs) 24 self._write_buffer: list[bytes] = [] 25 self.poll_task = None 26 self._poll_wait_time = 0.0005 27 self.sync_serial.timeout = 0 28 self.sync_serial.write_timeout = 0 29 30 def setup(self): 31 """Prepare to read/write""" 32 if os.name == "nt" or self.force_poll: 33 self.poll_task = asyncio.create_task(self._polling_task()) 34 else: 35 self.async_loop.add_reader(self.sync_serial.fileno(), self._read_ready) 36 self.async_loop.call_soon(self._protocol.connection_made, self) 37 38 def close(self, exc=None): 39 """Close the transport gracefully.""" 40 if not self.sync_serial: 41 return 42 with contextlib.suppress(Exception): 43 self.sync_serial.flush() 44 45 self.flush() 46 if self.poll_task: 47 self.poll_task.cancel() 48 _ = asyncio.ensure_future(self.poll_task) 49 self.poll_task = None 50 else: 51 self.async_loop.remove_reader(self.sync_serial.fileno()) 52 self.sync_serial.close() 53 self.sync_serial = None 54 with contextlib.suppress(Exception): 55 self._protocol.connection_lost(exc) 56 57 def write(self, data) -> None: 58 """Write some data to the transport.""" 59 self._write_buffer.append(data) 60 if not self.poll_task: 61 self.async_loop.add_writer(self.sync_serial.fileno(), self._write_ready) 62 63 def flush(self) -> None: 64 """Clear output buffer and stops any more data being written""" 65 if not self.poll_task: 66 self.async_loop.remove_writer(self.sync_serial.fileno()) 67 self._write_buffer.clear() 68 69 # ------------------------------------------------ 70 # Dummy methods needed to please asyncio.Transport. 71 # ------------------------------------------------ 72 @property 73 def loop(self): 74 """Return asyncio event loop.""" 75 return self.async_loop 76 77 def get_protocol(self) -> asyncio.BaseProtocol: 78 """Return protocol""" 79 return self._protocol 80 81 def set_protocol(self, protocol: asyncio.BaseProtocol) -> None: 82 """Set protocol""" 83 self._protocol = protocol 84 85 def get_write_buffer_limits(self) -> tuple[int, int]: 86 """Return buffer sizes""" 87 return (1, 1024) 88 89 def can_write_eof(self): 90 """Return Serial do not support end-of-file.""" 91 return False 92 93 def write_eof(self): 94 """Write end of file marker.""" 95 96 def set_write_buffer_limits(self, high=None, low=None): 97 """Set the high- and low-water limits for write flow control.""" 98 99 def get_write_buffer_size(self): 100 """Return The number of bytes in the write buffer.""" 101 return len(self._write_buffer) 102 103 def is_reading(self) -> bool: 104 """Return true if read is active.""" 105 return True 106 107 def pause_reading(self): 108 """Pause receiver.""" 109 110 def resume_reading(self): 111 """Resume receiver.""" 112 113 def is_closing(self): 114 """Return True if the transport is closing or closed.""" 115 return False 116 117 def abort(self): 118 """Close the transport immediately.""" 119 self.close() 120 121 # ------------------------------------------------ 122 123 def _read_ready(self): 124 """Test if there are data waiting.""" 125 try: 126 if data := self.sync_serial.read(1024): 127 self._protocol.data_received(data) 128 except serial.SerialException as exc: 129 self.close(exc=exc) 130 131 def _write_ready(self): 132 """Asynchronously write buffered data.""" 133 data = b"".join(self._write_buffer) 134 try: 135 if (nlen := self.sync_serial.write(data)) < len(data): 136 self._write_buffer = [data[nlen:]] 137 if not self.poll_task: 138 self.async_loop.add_writer( 139 self.sync_serial.fileno(), self._write_ready 140 ) 141 return 142 self.flush() 143 except (BlockingIOError, InterruptedError): 144 return 145 except serial.SerialException as exc: 146 self.close(exc=exc) 147 148 async def _polling_task(self): 149 """Poll and try to read/write.""" 150 try: 151 while True: 152 await asyncio.sleep(self._poll_wait_time) 153 while self._write_buffer: 154 self._write_ready() 155 if self.sync_serial.in_waiting: 156 self._read_ready() 157 except serial.SerialException as exc: 158 self.close(exc=exc) 159 except asyncio.CancelledError: 160 pass 161 162 163 async def create_serial_connection( 164 loop, protocol_factory, *args, **kwargs 165 ) -> tuple[asyncio.Transport, asyncio.BaseProtocol]: 166 """Create a connection to a new serial port instance.""" 167 protocol = protocol_factory() 168 transport = SerialTransport(loop, protocol, *args, **kwargs) 169 loop.call_soon(transport.setup) 170 return transport, protocol 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pymodbus/transport/transport_serial.py b/pymodbus/transport/transport_serial.py --- a/pymodbus/transport/transport_serial.py +++ b/pymodbus/transport/transport_serial.py @@ -51,8 +51,9 @@ self.async_loop.remove_reader(self.sync_serial.fileno()) self.sync_serial.close() self.sync_serial = None - with contextlib.suppress(Exception): - self._protocol.connection_lost(exc) + if exc: + with contextlib.suppress(Exception): + self._protocol.connection_lost(exc) def write(self, data) -> None: """Write some data to the transport."""
{"golden_diff": "diff --git a/pymodbus/transport/transport_serial.py b/pymodbus/transport/transport_serial.py\n--- a/pymodbus/transport/transport_serial.py\n+++ b/pymodbus/transport/transport_serial.py\n@@ -51,8 +51,9 @@\n self.async_loop.remove_reader(self.sync_serial.fileno())\n self.sync_serial.close()\n self.sync_serial = None\n- with contextlib.suppress(Exception):\n- self._protocol.connection_lost(exc)\n+ if exc:\n+ with contextlib.suppress(Exception):\n+ self._protocol.connection_lost(exc)\n \n def write(self, data) -> None:\n \"\"\"Write some data to the transport.\"\"\"\n", "issue": "AsyncModbusSerialClient crashes when slave_id is not present\n### Versions\r\n\r\n- Python: 3.11\r\n- OS: Windows 11 (ver 22H2, 22621.2428)\r\n- Pymodbus: 3.6.0dev\r\n- Modbus Hardware (if used): USB Serial\r\n\r\n### Pymodbus Specific\r\n\r\n- Client: rtu - async\r\n\r\n### Description\r\n\r\nUsing AsyncModbusSerialClient, attempting to read from a slave ID that is not present on the serial bus causes an AttributeError\r\n\r\n### Code and Logs\r\n\r\n```python\r\nimport asyncio\r\nimport sys\r\n\r\nimport pymodbus\r\nfrom pymodbus.client import AsyncModbusSerialClient, ModbusSerialClient\r\n\r\n# ----------------------------------------------------------------------\r\n\r\nasync def event_main_loop():\r\n mbparams = dict(\r\n timeout=0.5,\r\n retries=0,\r\n no_resend_on_retry=True,\r\n reconnect_delay=0,\r\n )\r\n mbdev = AsyncModbusSerialClient(\"COM6\", baudrate=19200, **mbparams)\r\n await mbdev.connect()\r\n\r\n try:\r\n res = await mbdev.read_holding_registers(0, 3, slave=1)\r\n print(\"read result: \", res)\r\n except pymodbus.exceptions.ModbusException as e:\r\n print(\"Blah: \", e)\r\n res = None\r\n\r\n if res and not res.isError():\r\n print(\"Read returned:\", res.registers)\r\n\r\n mbdev.close()\r\n\r\n# - - - - - - - - - - - - - - - - - - - - - - - - - -\r\n\r\ndef event_main_loop_sync():\r\n mbparams = dict(\r\n timeout=0.5,\r\n retries=0,\r\n no_resend_on_retry=True,\r\n reconnect_delay=0,\r\n )\r\n mbdev = ModbusSerialClient(\"COM6\", baudrate=19200, **mbparams)\r\n mbdev.connect()\r\n\r\n try:\r\n res = mbdev.read_holding_registers(0, 3, slave=1)\r\n print(\"read result: \", res)\r\n except pymodbus.exceptions.ModbusException as e:\r\n print(\"Blah: \", e)\r\n res = None\r\n\r\n if res and not res.isError():\r\n print(\"Read returned:\", res.registers)\r\n\r\n mbdev.close()\r\n\r\nif __name__ == \"__main__\":\r\n print(f\"Python version: {sys.version_info}, {pymodbus.__version_full__}\")\r\n pymodbus.pymodbus_apply_logging_config(log_file_name=\"t-mbserial.log\")\r\n #event_main_loop_sync()\r\n asyncio.run(event_main_loop())\r\n\r\n```\r\n\r\n# Log:\r\n```\r\nPython version: sys.version_info(major=3, minor=11, micro=5, releaselevel='final', serial=0), [pymodbus, version 3.6.0dev]\r\n2023-10-22 23:33:59,440 DEBUG logging:103 Connecting to COM6.\r\n2023-10-22 23:33:59,440 DEBUG logging:103 Connecting comm\r\n2023-10-22 23:33:59,456 DEBUG logging:103 Connected to comm\r\n2023-10-22 23:33:59,456 DEBUG logging:103 callback_connected called\r\n2023-10-22 23:33:59,456 DEBUG logging:103 send: 0x1 0x3 0x0 0x0 0x0 0x3 0x5 0xcb\r\n2023-10-22 23:33:59,456 DEBUG logging:103 Adding transaction 1\r\n2023-10-22 23:33:59,958 DEBUG logging:103 Connection lost comm due to Server not responding\r\n2023-10-22 23:34:00,068 DEBUG logging:103 Connection lost comm due to None\r\n2023-10-22 23:34:00,068 DEBUG logging:103 Getting transaction 1\r\nTraceback (most recent call last):\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\t-mbserial-async-noslave.py\", line 61, in <module>\r\n asyncio.run(event_main_loop())\r\n File \"c:\\ctw\\0-dev\\Python311\\Lib\\asyncio\\runners.py\", line 190, in run\r\n return runner.run(main)\r\n ^^^^^^^^^^^^^^^^\r\n File \"c:\\ctw\\0-dev\\Python311\\Lib\\asyncio\\runners.py\", line 118, in run\r\n return self._loop.run_until_complete(task)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"c:\\ctw\\0-dev\\Python311\\Lib\\asyncio\\base_events.py\", line 653, in run_until_complete\r\n return future.result()\r\n ^^^^^^^^^^^^^^^\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\t-mbserial-async-noslave.py\", line 22, in event_main_loop\r\n res = await mbdev.read_holding_registers(0, 3, slave=1)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\pymodbus\\client\\base.py\", line 207, in async_execute\r\n self.close(reconnect=True)\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\pymodbus\\client\\serial.py\", line 106, in close\r\n super().close(reconnect=reconnect)\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\pymodbus\\client\\base.py\", line 154, in close\r\n self.connection_lost(asyncio.TimeoutError(\"Server not responding\"))\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\pymodbus\\transport\\transport.py\", line 300, in connection_lost\r\n self.transport_close(intern=True)\r\n File \"C:\\ctw\\t-mbserial-async-noslave\\pymodbus\\transport\\transport.py\", line 419, in transport_close\r\n self.transport.close()\r\n ^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: 'NoneType' object has no attribute 'close'\r\n```\r\n\r\nWhen using the sync version, the code does not crash:\r\n```\r\nPython version: sys.version_info(major=3, minor=11, micro=5, releaselevel='final', serial=0), [pymodbus, version 3.6.0dev]\r\n2023-10-22 23:35:35,780 DEBUG logging:103 Current transaction state - IDLE\r\n2023-10-22 23:35:35,780 DEBUG logging:103 Running transaction 1\r\n2023-10-22 23:35:35,780 DEBUG logging:103 SEND: 0x1 0x3 0x0 0x0 0x0 0x3 0x5 0xcb\r\n2023-10-22 23:35:35,780 DEBUG logging:103 New Transaction state \"SENDING\"\r\n2023-10-22 23:35:35,780 DEBUG logging:103 Changing transaction state from \"SENDING\" to \"WAITING FOR REPLY\"\r\n2023-10-22 23:35:36,902 DEBUG logging:103 Transaction failed. (Modbus Error: [Invalid Message] No response received, expected at least 4 bytes (0 received))\r\n2023-10-22 23:35:36,902 DEBUG logging:103 Processing:\r\n2023-10-22 23:35:36,902 DEBUG logging:103 Getting transaction 1\r\n2023-10-22 23:35:36,902 DEBUG logging:103 Changing transaction state from \"PROCESSING REPLY\" to \"TRANSACTION_COMPLETE\"\r\nread result: Modbus Error: [Input/Output] Modbus Error: [Invalid Message] No response received, expected at least 4 bytes (0 received)\r\n```\r\n\r\nNote: the same issue happens with the current latest pymodbus v3.5.4.\r\n\n", "before_files": [{"content": "\"\"\"asyncio serial support for modbus (based on pyserial).\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\n\n\nwith contextlib.suppress(ImportError):\n import serial\n\n\nclass SerialTransport(asyncio.Transport):\n \"\"\"An asyncio serial transport.\"\"\"\n\n force_poll: bool = False\n\n def __init__(self, loop, protocol, *args, **kwargs) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__()\n self.async_loop = loop\n self._protocol: asyncio.BaseProtocol = protocol\n self.sync_serial = serial.serial_for_url(*args, **kwargs)\n self._write_buffer: list[bytes] = []\n self.poll_task = None\n self._poll_wait_time = 0.0005\n self.sync_serial.timeout = 0\n self.sync_serial.write_timeout = 0\n\n def setup(self):\n \"\"\"Prepare to read/write\"\"\"\n if os.name == \"nt\" or self.force_poll:\n self.poll_task = asyncio.create_task(self._polling_task())\n else:\n self.async_loop.add_reader(self.sync_serial.fileno(), self._read_ready)\n self.async_loop.call_soon(self._protocol.connection_made, self)\n\n def close(self, exc=None):\n \"\"\"Close the transport gracefully.\"\"\"\n if not self.sync_serial:\n return\n with contextlib.suppress(Exception):\n self.sync_serial.flush()\n\n self.flush()\n if self.poll_task:\n self.poll_task.cancel()\n _ = asyncio.ensure_future(self.poll_task)\n self.poll_task = None\n else:\n self.async_loop.remove_reader(self.sync_serial.fileno())\n self.sync_serial.close()\n self.sync_serial = None\n with contextlib.suppress(Exception):\n self._protocol.connection_lost(exc)\n\n def write(self, data) -> None:\n \"\"\"Write some data to the transport.\"\"\"\n self._write_buffer.append(data)\n if not self.poll_task:\n self.async_loop.add_writer(self.sync_serial.fileno(), self._write_ready)\n\n def flush(self) -> None:\n \"\"\"Clear output buffer and stops any more data being written\"\"\"\n if not self.poll_task:\n self.async_loop.remove_writer(self.sync_serial.fileno())\n self._write_buffer.clear()\n\n # ------------------------------------------------\n # Dummy methods needed to please asyncio.Transport.\n # ------------------------------------------------\n @property\n def loop(self):\n \"\"\"Return asyncio event loop.\"\"\"\n return self.async_loop\n\n def get_protocol(self) -> asyncio.BaseProtocol:\n \"\"\"Return protocol\"\"\"\n return self._protocol\n\n def set_protocol(self, protocol: asyncio.BaseProtocol) -> None:\n \"\"\"Set protocol\"\"\"\n self._protocol = protocol\n\n def get_write_buffer_limits(self) -> tuple[int, int]:\n \"\"\"Return buffer sizes\"\"\"\n return (1, 1024)\n\n def can_write_eof(self):\n \"\"\"Return Serial do not support end-of-file.\"\"\"\n return False\n\n def write_eof(self):\n \"\"\"Write end of file marker.\"\"\"\n\n def set_write_buffer_limits(self, high=None, low=None):\n \"\"\"Set the high- and low-water limits for write flow control.\"\"\"\n\n def get_write_buffer_size(self):\n \"\"\"Return The number of bytes in the write buffer.\"\"\"\n return len(self._write_buffer)\n\n def is_reading(self) -> bool:\n \"\"\"Return true if read is active.\"\"\"\n return True\n\n def pause_reading(self):\n \"\"\"Pause receiver.\"\"\"\n\n def resume_reading(self):\n \"\"\"Resume receiver.\"\"\"\n\n def is_closing(self):\n \"\"\"Return True if the transport is closing or closed.\"\"\"\n return False\n\n def abort(self):\n \"\"\"Close the transport immediately.\"\"\"\n self.close()\n\n # ------------------------------------------------\n\n def _read_ready(self):\n \"\"\"Test if there are data waiting.\"\"\"\n try:\n if data := self.sync_serial.read(1024):\n self._protocol.data_received(data)\n except serial.SerialException as exc:\n self.close(exc=exc)\n\n def _write_ready(self):\n \"\"\"Asynchronously write buffered data.\"\"\"\n data = b\"\".join(self._write_buffer)\n try:\n if (nlen := self.sync_serial.write(data)) < len(data):\n self._write_buffer = [data[nlen:]]\n if not self.poll_task:\n self.async_loop.add_writer(\n self.sync_serial.fileno(), self._write_ready\n )\n return\n self.flush()\n except (BlockingIOError, InterruptedError):\n return\n except serial.SerialException as exc:\n self.close(exc=exc)\n\n async def _polling_task(self):\n \"\"\"Poll and try to read/write.\"\"\"\n try:\n while True:\n await asyncio.sleep(self._poll_wait_time)\n while self._write_buffer:\n self._write_ready()\n if self.sync_serial.in_waiting:\n self._read_ready()\n except serial.SerialException as exc:\n self.close(exc=exc)\n except asyncio.CancelledError:\n pass\n\n\nasync def create_serial_connection(\n loop, protocol_factory, *args, **kwargs\n) -> tuple[asyncio.Transport, asyncio.BaseProtocol]:\n \"\"\"Create a connection to a new serial port instance.\"\"\"\n protocol = protocol_factory()\n transport = SerialTransport(loop, protocol, *args, **kwargs)\n loop.call_soon(transport.setup)\n return transport, protocol\n", "path": "pymodbus/transport/transport_serial.py"}], "after_files": [{"content": "\"\"\"asyncio serial support for modbus (based on pyserial).\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport contextlib\nimport os\n\n\nwith contextlib.suppress(ImportError):\n import serial\n\n\nclass SerialTransport(asyncio.Transport):\n \"\"\"An asyncio serial transport.\"\"\"\n\n force_poll: bool = False\n\n def __init__(self, loop, protocol, *args, **kwargs) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__()\n self.async_loop = loop\n self._protocol: asyncio.BaseProtocol = protocol\n self.sync_serial = serial.serial_for_url(*args, **kwargs)\n self._write_buffer: list[bytes] = []\n self.poll_task = None\n self._poll_wait_time = 0.0005\n self.sync_serial.timeout = 0\n self.sync_serial.write_timeout = 0\n\n def setup(self):\n \"\"\"Prepare to read/write\"\"\"\n if os.name == \"nt\" or self.force_poll:\n self.poll_task = asyncio.create_task(self._polling_task())\n else:\n self.async_loop.add_reader(self.sync_serial.fileno(), self._read_ready)\n self.async_loop.call_soon(self._protocol.connection_made, self)\n\n def close(self, exc=None):\n \"\"\"Close the transport gracefully.\"\"\"\n if not self.sync_serial:\n return\n with contextlib.suppress(Exception):\n self.sync_serial.flush()\n\n self.flush()\n if self.poll_task:\n self.poll_task.cancel()\n _ = asyncio.ensure_future(self.poll_task)\n self.poll_task = None\n else:\n self.async_loop.remove_reader(self.sync_serial.fileno())\n self.sync_serial.close()\n self.sync_serial = None\n if exc:\n with contextlib.suppress(Exception):\n self._protocol.connection_lost(exc)\n\n def write(self, data) -> None:\n \"\"\"Write some data to the transport.\"\"\"\n self._write_buffer.append(data)\n if not self.poll_task:\n self.async_loop.add_writer(self.sync_serial.fileno(), self._write_ready)\n\n def flush(self) -> None:\n \"\"\"Clear output buffer and stops any more data being written\"\"\"\n if not self.poll_task:\n self.async_loop.remove_writer(self.sync_serial.fileno())\n self._write_buffer.clear()\n\n # ------------------------------------------------\n # Dummy methods needed to please asyncio.Transport.\n # ------------------------------------------------\n @property\n def loop(self):\n \"\"\"Return asyncio event loop.\"\"\"\n return self.async_loop\n\n def get_protocol(self) -> asyncio.BaseProtocol:\n \"\"\"Return protocol\"\"\"\n return self._protocol\n\n def set_protocol(self, protocol: asyncio.BaseProtocol) -> None:\n \"\"\"Set protocol\"\"\"\n self._protocol = protocol\n\n def get_write_buffer_limits(self) -> tuple[int, int]:\n \"\"\"Return buffer sizes\"\"\"\n return (1, 1024)\n\n def can_write_eof(self):\n \"\"\"Return Serial do not support end-of-file.\"\"\"\n return False\n\n def write_eof(self):\n \"\"\"Write end of file marker.\"\"\"\n\n def set_write_buffer_limits(self, high=None, low=None):\n \"\"\"Set the high- and low-water limits for write flow control.\"\"\"\n\n def get_write_buffer_size(self):\n \"\"\"Return The number of bytes in the write buffer.\"\"\"\n return len(self._write_buffer)\n\n def is_reading(self) -> bool:\n \"\"\"Return true if read is active.\"\"\"\n return True\n\n def pause_reading(self):\n \"\"\"Pause receiver.\"\"\"\n\n def resume_reading(self):\n \"\"\"Resume receiver.\"\"\"\n\n def is_closing(self):\n \"\"\"Return True if the transport is closing or closed.\"\"\"\n return False\n\n def abort(self):\n \"\"\"Close the transport immediately.\"\"\"\n self.close()\n\n # ------------------------------------------------\n\n def _read_ready(self):\n \"\"\"Test if there are data waiting.\"\"\"\n try:\n if data := self.sync_serial.read(1024):\n self._protocol.data_received(data)\n except serial.SerialException as exc:\n self.close(exc=exc)\n\n def _write_ready(self):\n \"\"\"Asynchronously write buffered data.\"\"\"\n data = b\"\".join(self._write_buffer)\n try:\n if (nlen := self.sync_serial.write(data)) < len(data):\n self._write_buffer = [data[nlen:]]\n if not self.poll_task:\n self.async_loop.add_writer(\n self.sync_serial.fileno(), self._write_ready\n )\n return\n self.flush()\n except (BlockingIOError, InterruptedError):\n return\n except serial.SerialException as exc:\n self.close(exc=exc)\n\n async def _polling_task(self):\n \"\"\"Poll and try to read/write.\"\"\"\n try:\n while True:\n await asyncio.sleep(self._poll_wait_time)\n while self._write_buffer:\n self._write_ready()\n if self.sync_serial.in_waiting:\n self._read_ready()\n except serial.SerialException as exc:\n self.close(exc=exc)\n except asyncio.CancelledError:\n pass\n\n\nasync def create_serial_connection(\n loop, protocol_factory, *args, **kwargs\n) -> tuple[asyncio.Transport, asyncio.BaseProtocol]:\n \"\"\"Create a connection to a new serial port instance.\"\"\"\n protocol = protocol_factory()\n transport = SerialTransport(loop, protocol, *args, **kwargs)\n loop.call_soon(transport.setup)\n return transport, protocol\n", "path": "pymodbus/transport/transport_serial.py"}]}
3,776
150
gh_patches_debug_18077
rasdani/github-patches
git_diff
freedomofpress__securedrop-3258
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [qt-journalist-updater] Add a time estimate to the update progress bar ## Description [See epic #3076 for primary feature, PRs for this ticket should go into `qt-journalist-updater`] We should add a reasonable time estimate to the update progress bar as the upgrade progresses ## User Stories As a journalist user, I want an estimate of how long it will take until updates are finished so that I can plan. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `journalist_gui/journalist_gui/strings.py` Content: ``` 1 window_title = 'SecureDrop Updater' 2 update_in_progress = ("SecureDrop workstation updates are available! " 3 "You should install them now. If you don\'t want to, " 4 "you can install them the next time you reboot.") 5 fetching_update = 'Fetching and verifying latest update...' 6 updating_tails_env = 'Configuring local Tails environment...' 7 finished = 'Update successfully completed!' 8 finished_dialog_message = 'Updates completed successfully. Click OK to close.' 9 finished_dialog_title = 'SecureDrop Workstation is up to date!' 10 update_failed_dialog_title = 'Error Updating SecureDrop Workstation' 11 update_failed_generic_reason = ("Update failed. " 12 "Please contact your SecureDrop " 13 "administrator.") 14 update_failed_sig_failure = ("WARNING: Signature verification failed. " 15 "Contact your SecureDrop administrator " 16 "immediately.") 17 tailsconfig_failed_sudo_password = 'Sudo password incorrect' 18 tailsconfig_failed_generic_reason = ("Tails workstation configuration failed. " 19 "Contact your administrator.") 20 install_update_button = 'Install Now' 21 install_later_button = 'Install Later' 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/journalist_gui/journalist_gui/strings.py b/journalist_gui/journalist_gui/strings.py --- a/journalist_gui/journalist_gui/strings.py +++ b/journalist_gui/journalist_gui/strings.py @@ -2,8 +2,10 @@ update_in_progress = ("SecureDrop workstation updates are available! " "You should install them now. If you don\'t want to, " "you can install them the next time you reboot.") -fetching_update = 'Fetching and verifying latest update...' -updating_tails_env = 'Configuring local Tails environment...' +fetching_update = ('Fetching and verifying latest update...' + ' (4 mins remaining)') +updating_tails_env = ('Configuring local Tails environment...' + ' (1 min remaining)') finished = 'Update successfully completed!' finished_dialog_message = 'Updates completed successfully. Click OK to close.' finished_dialog_title = 'SecureDrop Workstation is up to date!'
{"golden_diff": "diff --git a/journalist_gui/journalist_gui/strings.py b/journalist_gui/journalist_gui/strings.py\n--- a/journalist_gui/journalist_gui/strings.py\n+++ b/journalist_gui/journalist_gui/strings.py\n@@ -2,8 +2,10 @@\n update_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\n-fetching_update = 'Fetching and verifying latest update...'\n-updating_tails_env = 'Configuring local Tails environment...'\n+fetching_update = ('Fetching and verifying latest update...'\n+ ' (4 mins remaining)')\n+updating_tails_env = ('Configuring local Tails environment...'\n+ ' (1 min remaining)')\n finished = 'Update successfully completed!'\n finished_dialog_message = 'Updates completed successfully. Click OK to close.'\n finished_dialog_title = 'SecureDrop Workstation is up to date!'\n", "issue": "[qt-journalist-updater] Add a time estimate to the update progress bar\n## Description\r\n\r\n[See epic #3076 for primary feature, PRs for this ticket should go into `qt-journalist-updater`]\r\n\r\nWe should add a reasonable time estimate to the update progress bar as the upgrade progresses\r\n\r\n## User Stories\r\n\r\nAs a journalist user, I want an estimate of how long it will take until updates are finished so that I can plan. \n", "before_files": [{"content": "window_title = 'SecureDrop Updater'\nupdate_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\nfetching_update = 'Fetching and verifying latest update...'\nupdating_tails_env = 'Configuring local Tails environment...'\nfinished = 'Update successfully completed!'\nfinished_dialog_message = 'Updates completed successfully. Click OK to close.'\nfinished_dialog_title = 'SecureDrop Workstation is up to date!'\nupdate_failed_dialog_title = 'Error Updating SecureDrop Workstation'\nupdate_failed_generic_reason = (\"Update failed. \"\n \"Please contact your SecureDrop \"\n \"administrator.\")\nupdate_failed_sig_failure = (\"WARNING: Signature verification failed. \"\n \"Contact your SecureDrop administrator \"\n \"immediately.\")\ntailsconfig_failed_sudo_password = 'Sudo password incorrect'\ntailsconfig_failed_generic_reason = (\"Tails workstation configuration failed. \"\n \"Contact your administrator.\")\ninstall_update_button = 'Install Now'\ninstall_later_button = 'Install Later'\n", "path": "journalist_gui/journalist_gui/strings.py"}], "after_files": [{"content": "window_title = 'SecureDrop Updater'\nupdate_in_progress = (\"SecureDrop workstation updates are available! \"\n \"You should install them now. If you don\\'t want to, \"\n \"you can install them the next time you reboot.\")\nfetching_update = ('Fetching and verifying latest update...'\n ' (4 mins remaining)')\nupdating_tails_env = ('Configuring local Tails environment...'\n ' (1 min remaining)')\nfinished = 'Update successfully completed!'\nfinished_dialog_message = 'Updates completed successfully. Click OK to close.'\nfinished_dialog_title = 'SecureDrop Workstation is up to date!'\nupdate_failed_dialog_title = 'Error Updating SecureDrop Workstation'\nupdate_failed_generic_reason = (\"Update failed. \"\n \"Please contact your SecureDrop \"\n \"administrator.\")\nupdate_failed_sig_failure = (\"WARNING: Signature verification failed. \"\n \"Contact your SecureDrop administrator \"\n \"immediately.\")\ntailsconfig_failed_sudo_password = 'Sudo password incorrect'\ntailsconfig_failed_generic_reason = (\"Tails workstation configuration failed. \"\n \"Contact your administrator.\")\ninstall_update_button = 'Install Now'\ninstall_later_button = 'Install Later'\n", "path": "journalist_gui/journalist_gui/strings.py"}]}
616
211
gh_patches_debug_51100
rasdani/github-patches
git_diff
ivy-llc__ivy-17988
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- angle --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/math.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back 5 6 7 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 8 @to_ivy_arrays_and_back 9 def sin(x, name=None): 10 return ivy.sin(x) 11 12 13 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 14 @to_ivy_arrays_and_back 15 def cos(x, name=None): 16 return ivy.cos(x) 17 18 19 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 20 @to_ivy_arrays_and_back 21 def acos(x, name=None): 22 return ivy.acos(x) 23 24 25 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 26 @to_ivy_arrays_and_back 27 def cosh(x, name=None): 28 return ivy.cosh(x) 29 30 31 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 32 @to_ivy_arrays_and_back 33 def tanh(x, name=None): 34 return ivy.tanh(x) 35 36 37 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 38 @to_ivy_arrays_and_back 39 def acosh(x, name=None): 40 return ivy.acosh(x) 41 42 43 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 44 @to_ivy_arrays_and_back 45 def asin(x, name=None): 46 return ivy.asin(x) 47 48 49 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 50 @to_ivy_arrays_and_back 51 def log(x, name=None): 52 return ivy.log(x) 53 54 55 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 56 @to_ivy_arrays_and_back 57 def divide(x, y, name=None): 58 return ivy.divide(x, y) 59 60 61 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 62 @to_ivy_arrays_and_back 63 def abs(x, name=None): 64 return ivy.abs(x) 65 66 67 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 68 @to_ivy_arrays_and_back 69 def multiply(x, y, name=None): 70 return ivy.multiply(x, y) 71 72 73 @with_unsupported_dtypes( 74 {"2.5.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle" 75 ) 76 @to_ivy_arrays_and_back 77 def add(x, y, name=None): 78 return ivy.add(x, y) 79 80 81 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 82 @to_ivy_arrays_and_back 83 def subtract(x, y, name=None): 84 return ivy.subtract(x, y) 85 86 87 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 88 @to_ivy_arrays_and_back 89 def sqrt(x, name=None): 90 return ivy.sqrt(x) 91 92 93 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 94 @to_ivy_arrays_and_back 95 def atanh(x, name=None): 96 return ivy.atanh(x) 97 98 99 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 100 @to_ivy_arrays_and_back 101 def atan(x, name=None): 102 return ivy.atan(x) 103 104 105 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 106 @to_ivy_arrays_and_back 107 def round(x, name=None): 108 return ivy.round(x) 109 110 111 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 112 @to_ivy_arrays_and_back 113 def ceil(x, name=None): 114 return ivy.ceil(x) 115 116 117 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 118 @to_ivy_arrays_and_back 119 def sinh(x, name=None): 120 return ivy.sinh(x) 121 122 123 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 124 @to_ivy_arrays_and_back 125 def pow(x, y, name=None): 126 return ivy.pow(x, y) 127 128 129 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle") 130 @to_ivy_arrays_and_back 131 def conj(x, name=None): 132 return ivy.conj(x) 133 134 135 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 136 @to_ivy_arrays_and_back 137 def floor(x, name=None): 138 return ivy.floor(x) 139 140 141 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 142 @to_ivy_arrays_and_back 143 def remainder(x, y, name=None): 144 return ivy.remainder(x, y) 145 146 147 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 148 @to_ivy_arrays_and_back 149 def log2(x, name=None): 150 return ivy.log2(x) 151 152 153 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 154 @to_ivy_arrays_and_back 155 def log1p(x, name=None): 156 return ivy.log1p(x) 157 158 159 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 160 @to_ivy_arrays_and_back 161 def rad2deg(x, name=None): 162 return ivy.rad2deg(x) 163 164 165 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 166 @to_ivy_arrays_and_back 167 def deg2rad(x, name=None): 168 return ivy.deg2rad(x) 169 170 171 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 172 @to_ivy_arrays_and_back 173 def gcd(x, y, name=None): 174 return ivy.gcd(x, y) 175 176 177 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 178 @to_ivy_arrays_and_back 179 def tan(x, name=None): 180 return ivy.tan(x) 181 182 183 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 184 @to_ivy_arrays_and_back 185 def atan2(x, y, name=None): 186 return ivy.atan2(x, y) 187 188 189 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 190 @to_ivy_arrays_and_back 191 def square(x, name=None): 192 return ivy.square(x) 193 194 195 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 196 @to_ivy_arrays_and_back 197 def sign(x, name=None): 198 return ivy.sign(x) 199 200 201 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 202 @to_ivy_arrays_and_back 203 def neg(x, name=None): 204 return ivy.negative(x) 205 206 207 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 208 @to_ivy_arrays_and_back 209 def exp(x, name=None): 210 return ivy.exp(x) 211 212 213 @with_supported_dtypes( 214 { 215 "2.4.2 and below": ( 216 "float32", 217 "float64", 218 "int32", 219 "int64", 220 "complex64", 221 "complex128", 222 ) 223 }, 224 "paddle", 225 ) 226 @to_ivy_arrays_and_back 227 def cumprod(x, dim=None, dtype=None, name=None): 228 return ivy.cumprod(x, axis=dim, dtype=dtype) 229 230 231 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 232 @to_ivy_arrays_and_back 233 def reciprocal(x, name=None): 234 return ivy.reciprocal(x) 235 236 237 @with_supported_dtypes({"2.5.0 and below": ("int32", "int64")}, "paddle") 238 @to_ivy_arrays_and_back 239 def gcd(x, y, name=None): 240 return ivy.gcd(x, y) 241 242 243 @with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle") 244 @to_ivy_arrays_and_back 245 def fmin(x, y, name=None): 246 return ivy.fmin(x, y) 247 248 249 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 250 @to_ivy_arrays_and_back 251 def logit(x, eps=None, name=None): 252 return ivy.logit(x, eps=eps) 253 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -234,10 +234,13 @@ return ivy.reciprocal(x) -@with_supported_dtypes({"2.5.0 and below": ("int32", "int64")}, "paddle") +@with_supported_dtypes( + {"2.5.0 and below": ("complex64", "complex128", "float32", "float64")}, + "paddle", +) @to_ivy_arrays_and_back -def gcd(x, y, name=None): - return ivy.gcd(x, y) +def angle(x, name=None): + return ivy.angle(x) @with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle")
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -234,10 +234,13 @@\n return ivy.reciprocal(x)\n \n \n-@with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\n+@with_supported_dtypes(\n+ {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n+ \"paddle\",\n+)\n @to_ivy_arrays_and_back\n-def gcd(x, y, name=None):\n- return ivy.gcd(x, y)\n+def angle(x, name=None):\n+ return ivy.angle(x)\n \n \n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n", "issue": "angle\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes(\n {\"2.5.0 and below\": (\"complex64\", \"complex128\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef angle(x, name=None):\n return ivy.angle(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef logit(x, eps=None, name=None):\n return ivy.logit(x, eps=eps)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
3,236
233
gh_patches_debug_10610
rasdani/github-patches
git_diff
spacetelescope__jwql-465
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Import environment bokeh version in HTML As described in #425, having the environment Bokeh version NOT match the HTML template Bokeh version can cause problems. I wrote some tests #429 that will test and make sure this is always the case in the future, but we could be even smarter about this if we programmatically set the Bokeh version using a Django variable. We could even include it in `context_processors` so it happens behind the scenes. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `jwql/website/apps/jwql/context_processors.py` Content: ``` 1 """Provides functions that define context inherent to all views. 2 3 The functions within this module define ``context`` that will be 4 included in requests, in addition to any specific ``context`` provided 5 in the view. 6 7 Authors 8 ------- 9 10 - Matthew Bourque 11 12 Use 13 --- 14 15 This module is defined under the ``TEMPLATES.OPTIONS`` setting in 16 ``settings.py``, e.g.: 17 :: 18 19 TEMPLATES = [ 20 {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},} 21 ] 22 23 As such, it will automatically be executed upon each request. 24 """ 25 26 import jwql 27 from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS 28 29 from .oauth import auth_info 30 31 32 @auth_info 33 def base_context(request, user): 34 """Provide the context needed for the ``base.html`` template. 35 36 Parameters 37 ---------- 38 request : HttpRequest object 39 Incoming request from the webpage 40 41 Returns 42 ------- 43 context : dict 44 A dictionary containing data needed to render the ``base.html`` 45 template 46 """ 47 48 context = {} 49 context['inst_list'] = JWST_INSTRUMENT_NAMES 50 context['tools'] = MONITORS 51 context['user'] = user 52 context['version'] = jwql.__version__ 53 54 return context 55 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/jwql/website/apps/jwql/context_processors.py b/jwql/website/apps/jwql/context_processors.py --- a/jwql/website/apps/jwql/context_processors.py +++ b/jwql/website/apps/jwql/context_processors.py @@ -23,6 +23,8 @@ As such, it will automatically be executed upon each request. """ +import bokeh + import jwql from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS @@ -50,5 +52,6 @@ context['tools'] = MONITORS context['user'] = user context['version'] = jwql.__version__ + context['bokeh_version'] = bokeh.__version__ return context
{"golden_diff": "diff --git a/jwql/website/apps/jwql/context_processors.py b/jwql/website/apps/jwql/context_processors.py\n--- a/jwql/website/apps/jwql/context_processors.py\n+++ b/jwql/website/apps/jwql/context_processors.py\n@@ -23,6 +23,8 @@\n As such, it will automatically be executed upon each request.\n \"\"\"\n \n+import bokeh\n+\n import jwql\n from jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n \n@@ -50,5 +52,6 @@\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n+ context['bokeh_version'] = bokeh.__version__\n \n return context\n", "issue": "Import environment bokeh version in HTML\nAs described in #425, having the environment Bokeh version NOT match the HTML template Bokeh version can cause problems.\r\n\r\nI wrote some tests #429 that will test and make sure this is always the case in the future, but we could be even smarter about this if we programmatically set the Bokeh version using a Django variable. We could even include it in `context_processors` so it happens behind the scenes.\n", "before_files": [{"content": "\"\"\"Provides functions that define context inherent to all views.\n\nThe functions within this module define ``context`` that will be\nincluded in requests, in addition to any specific ``context`` provided\nin the view.\n\nAuthors\n-------\n\n - Matthew Bourque\n\nUse\n---\n\n This module is defined under the ``TEMPLATES.OPTIONS`` setting in\n ``settings.py``, e.g.:\n ::\n\n TEMPLATES = [\n {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},}\n ]\n\n As such, it will automatically be executed upon each request.\n\"\"\"\n\nimport jwql\nfrom jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n\nfrom .oauth import auth_info\n\n\n@auth_info\ndef base_context(request, user):\n \"\"\"Provide the context needed for the ``base.html`` template.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n context : dict\n A dictionary containing data needed to render the ``base.html``\n template\n \"\"\"\n\n context = {}\n context['inst_list'] = JWST_INSTRUMENT_NAMES\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n\n return context\n", "path": "jwql/website/apps/jwql/context_processors.py"}], "after_files": [{"content": "\"\"\"Provides functions that define context inherent to all views.\n\nThe functions within this module define ``context`` that will be\nincluded in requests, in addition to any specific ``context`` provided\nin the view.\n\nAuthors\n-------\n\n - Matthew Bourque\n\nUse\n---\n\n This module is defined under the ``TEMPLATES.OPTIONS`` setting in\n ``settings.py``, e.g.:\n ::\n\n TEMPLATES = [\n {'OPTIONS': {'context_processors': ['jwql.website.apps.jwql.context_processors.base_context'],},}\n ]\n\n As such, it will automatically be executed upon each request.\n\"\"\"\n\nimport bokeh\n\nimport jwql\nfrom jwql.utils.constants import JWST_INSTRUMENT_NAMES, MONITORS\n\nfrom .oauth import auth_info\n\n\n@auth_info\ndef base_context(request, user):\n \"\"\"Provide the context needed for the ``base.html`` template.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n context : dict\n A dictionary containing data needed to render the ``base.html``\n template\n \"\"\"\n\n context = {}\n context['inst_list'] = JWST_INSTRUMENT_NAMES\n context['tools'] = MONITORS\n context['user'] = user\n context['version'] = jwql.__version__\n context['bokeh_version'] = bokeh.__version__\n\n return context\n", "path": "jwql/website/apps/jwql/context_processors.py"}]}
759
177
gh_patches_debug_5092
rasdani/github-patches
git_diff
fossasia__open-event-server-5383
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improper exception handling in manage.py **Describe the bug** Exceptions are not handled properly in `initialize_db` method of `manage` module. **To Reproduce** Steps to reproduce the behavior: 1. Open 'manage.py' 2. See https://github.com/fossasia/open-event-server/blob/05cbd1abee0d81dd24d701420c3530e3b9bd645f/manage.py#L77 **Expected behavior** It should follow standard exception handling format using `Exception` class. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `manage.py` Content: ``` 1 import os 2 3 from app.api.helpers.db import save_to_db 4 from app.models.event import Event, get_new_event_identifier 5 from app import manager 6 from app import current_app as app 7 from app.models import db 8 from app.models.speaker import Speaker 9 from populate_db import populate 10 from flask_migrate import stamp 11 from sqlalchemy.engine import reflection 12 13 from tests.unittests.auth_helper import create_super_admin 14 15 16 @manager.command 17 def list_routes(): 18 import urllib 19 20 output = [] 21 for rule in app.url_map.iter_rules(): 22 methods = ','.join(rule.methods) 23 line = urllib.unquote("{:50s} {:20s} {}".format( 24 rule.endpoint, methods, rule)) 25 output.append(line) 26 27 for line in sorted(output): 28 print(line) 29 30 31 @manager.command 32 def add_event_identifier(): 33 events = Event.query.all() 34 for event in events: 35 event.identifier = get_new_event_identifier() 36 save_to_db(event) 37 38 39 @manager.option('-e', '--event', help='Event ID. Eg. 1') 40 def fix_speaker_images(event): 41 from app.helpers.sessions_speakers.speakers import speaker_image_sizes 42 from app.helpers.sessions_speakers.speakers import save_resized_photo 43 import urllib 44 from app.helpers.storage import generate_hash 45 event_id = int(event) 46 image_sizes = speaker_image_sizes() 47 speakers = Speaker.query.filter_by(event_id=event_id).all() 48 for speaker in speakers: 49 if speaker.photo and speaker.photo.strip() != '': 50 file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg' 51 file_path = app.config['BASE_DIR'] + '/' + file_relative_path 52 urllib.urlretrieve(speaker.photo, file_path) 53 speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes) 54 speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes) 55 speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes) 56 db.session.add(speaker) 57 os.remove(file_path) 58 print("Downloaded " + speaker.photo + " into " + file_relative_path) 59 print("Processed - " + str(speaker.id)) 60 db.session.commit() 61 62 63 @manager.option('-c', '--credentials', help='Super admin credentials. Eg. username:password') 64 def initialize_db(credentials): 65 with app.app_context(): 66 populate_data = True 67 inspector = reflection.Inspector.from_engine(db.engine) 68 table_name = 'events' 69 table_names = inspector.get_table_names() 70 print("[LOG] Existing tables:") 71 print("[LOG] " + ','.join(table_names)) 72 if table_name not in table_names: 73 print("[LOG] Table not found. Attempting creation") 74 try: 75 db.create_all() 76 stamp() 77 except: 78 populate_data = False 79 print("[LOG] Could not create tables. Either database does not exist or tables already created") 80 if populate_data: 81 credentials = credentials.split(":") 82 create_super_admin(credentials[0], credentials[1]) 83 populate() 84 else: 85 print("[LOG] Tables already exist. Skipping data population & creation.") 86 87 88 @manager.command 89 def prepare_kubernetes_db(): 90 with app.app_context(): 91 initialize_db('[email protected]:fossasia') 92 93 94 if __name__ == "__main__": 95 manager.run() 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/manage.py b/manage.py --- a/manage.py +++ b/manage.py @@ -74,7 +74,7 @@ try: db.create_all() stamp() - except: + except Exception: populate_data = False print("[LOG] Could not create tables. Either database does not exist or tables already created") if populate_data:
{"golden_diff": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -74,7 +74,7 @@\n try:\n db.create_all()\n stamp()\n- except:\n+ except Exception:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n", "issue": "Improper exception handling in manage.py\n**Describe the bug**\r\nExceptions are not handled properly in `initialize_db` method of `manage` module.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open 'manage.py'\r\n2. See https://github.com/fossasia/open-event-server/blob/05cbd1abee0d81dd24d701420c3530e3b9bd645f/manage.py#L77\r\n\r\n**Expected behavior**\r\nIt should follow standard exception handling format using `Exception` class.\n", "before_files": [{"content": "import os\n\nfrom app.api.helpers.db import save_to_db\nfrom app.models.event import Event, get_new_event_identifier\nfrom app import manager\nfrom app import current_app as app\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom populate_db import populate\nfrom flask_migrate import stamp\nfrom sqlalchemy.engine import reflection\n\nfrom tests.unittests.auth_helper import create_super_admin\n\n\[email protected]\ndef list_routes():\n import urllib\n\n output = []\n for rule in app.url_map.iter_rules():\n methods = ','.join(rule.methods)\n line = urllib.unquote(\"{:50s} {:20s} {}\".format(\n rule.endpoint, methods, rule))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\[email protected]\ndef add_event_identifier():\n events = Event.query.all()\n for event in events:\n event.identifier = get_new_event_identifier()\n save_to_db(event)\n\n\[email protected]('-e', '--event', help='Event ID. Eg. 1')\ndef fix_speaker_images(event):\n from app.helpers.sessions_speakers.speakers import speaker_image_sizes\n from app.helpers.sessions_speakers.speakers import save_resized_photo\n import urllib\n from app.helpers.storage import generate_hash\n event_id = int(event)\n image_sizes = speaker_image_sizes()\n speakers = Speaker.query.filter_by(event_id=event_id).all()\n for speaker in speakers:\n if speaker.photo and speaker.photo.strip() != '':\n file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'\n file_path = app.config['BASE_DIR'] + '/' + file_relative_path\n urllib.urlretrieve(speaker.photo, file_path)\n speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)\n speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)\n speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)\n db.session.add(speaker)\n os.remove(file_path)\n print(\"Downloaded \" + speaker.photo + \" into \" + file_relative_path)\n print(\"Processed - \" + str(speaker.id))\n db.session.commit()\n\n\[email protected]('-c', '--credentials', help='Super admin credentials. Eg. username:password')\ndef initialize_db(credentials):\n with app.app_context():\n populate_data = True\n inspector = reflection.Inspector.from_engine(db.engine)\n table_name = 'events'\n table_names = inspector.get_table_names()\n print(\"[LOG] Existing tables:\")\n print(\"[LOG] \" + ','.join(table_names))\n if table_name not in table_names:\n print(\"[LOG] Table not found. Attempting creation\")\n try:\n db.create_all()\n stamp()\n except:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n credentials = credentials.split(\":\")\n create_super_admin(credentials[0], credentials[1])\n populate()\n else:\n print(\"[LOG] Tables already exist. Skipping data population & creation.\")\n\n\[email protected]\ndef prepare_kubernetes_db():\n with app.app_context():\n initialize_db('[email protected]:fossasia')\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}], "after_files": [{"content": "import os\n\nfrom app.api.helpers.db import save_to_db\nfrom app.models.event import Event, get_new_event_identifier\nfrom app import manager\nfrom app import current_app as app\nfrom app.models import db\nfrom app.models.speaker import Speaker\nfrom populate_db import populate\nfrom flask_migrate import stamp\nfrom sqlalchemy.engine import reflection\n\nfrom tests.unittests.auth_helper import create_super_admin\n\n\[email protected]\ndef list_routes():\n import urllib\n\n output = []\n for rule in app.url_map.iter_rules():\n methods = ','.join(rule.methods)\n line = urllib.unquote(\"{:50s} {:20s} {}\".format(\n rule.endpoint, methods, rule))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\[email protected]\ndef add_event_identifier():\n events = Event.query.all()\n for event in events:\n event.identifier = get_new_event_identifier()\n save_to_db(event)\n\n\[email protected]('-e', '--event', help='Event ID. Eg. 1')\ndef fix_speaker_images(event):\n from app.helpers.sessions_speakers.speakers import speaker_image_sizes\n from app.helpers.sessions_speakers.speakers import save_resized_photo\n import urllib\n from app.helpers.storage import generate_hash\n event_id = int(event)\n image_sizes = speaker_image_sizes()\n speakers = Speaker.query.filter_by(event_id=event_id).all()\n for speaker in speakers:\n if speaker.photo and speaker.photo.strip() != '':\n file_relative_path = 'static/media/temp/' + generate_hash(str(speaker.id)) + '.jpg'\n file_path = app.config['BASE_DIR'] + '/' + file_relative_path\n urllib.urlretrieve(speaker.photo, file_path)\n speaker.small = save_resized_photo(file_path, event_id, speaker.id, 'small', image_sizes)\n speaker.thumbnail = save_resized_photo(file_path, event_id, speaker.id, 'thumbnail', image_sizes)\n speaker.icon = save_resized_photo(file_path, event_id, speaker.id, 'icon', image_sizes)\n db.session.add(speaker)\n os.remove(file_path)\n print(\"Downloaded \" + speaker.photo + \" into \" + file_relative_path)\n print(\"Processed - \" + str(speaker.id))\n db.session.commit()\n\n\[email protected]('-c', '--credentials', help='Super admin credentials. Eg. username:password')\ndef initialize_db(credentials):\n with app.app_context():\n populate_data = True\n inspector = reflection.Inspector.from_engine(db.engine)\n table_name = 'events'\n table_names = inspector.get_table_names()\n print(\"[LOG] Existing tables:\")\n print(\"[LOG] \" + ','.join(table_names))\n if table_name not in table_names:\n print(\"[LOG] Table not found. Attempting creation\")\n try:\n db.create_all()\n stamp()\n except Exception:\n populate_data = False\n print(\"[LOG] Could not create tables. Either database does not exist or tables already created\")\n if populate_data:\n credentials = credentials.split(\":\")\n create_super_admin(credentials[0], credentials[1])\n populate()\n else:\n print(\"[LOG] Tables already exist. Skipping data population & creation.\")\n\n\[email protected]\ndef prepare_kubernetes_db():\n with app.app_context():\n initialize_db('[email protected]:fossasia')\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}]}
1,296
83
gh_patches_debug_2676
rasdani/github-patches
git_diff
falconry__falcon-981
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Doc site: On small screen height, sidebar ("Navigation") clips at bottom. Using a laptop with 768 pixels height resolution. ![sidebar_clip](https://cloud.githubusercontent.com/assets/13870149/21928904/8a6fef78-d9b3-11e6-847f-af666a42c113.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Falcon documentation build configuration file, created by 4 # sphinx-quickstart on Wed Mar 12 14:14:02 2014. 5 # 6 # This file is execfile()d with the current directory set to its 7 # containing dir. 8 # 9 # Note that not all possible configuration values are present in this 10 # autogenerated file. 11 # 12 # All configuration values have a default; values that are commented out 13 # serve to show the default. 14 15 import sys 16 import os 17 18 try: 19 import configparser 20 except ImportError: 21 import ConfigParser as configparser 22 23 import falcon 24 25 # on_rtd is whether we are on readthedocs.org 26 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 27 28 # If extensions (or modules to document with autodoc) are in another directory, 29 # add these directories to sys.path here. If the directory is relative to the 30 # documentation root, use os.path.abspath to make it absolute, like shown here. 31 sys.path.insert(0, os.path.abspath('..')) 32 sys.path.insert(0, os.path.abspath('.')) 33 34 # Path to custom themes 35 sys.path.append(os.path.abspath('_themes')) 36 37 # -- General configuration ------------------------------------------------ 38 39 # If your documentation needs a minimal Sphinx version, state it here. 40 #needs_sphinx = '1.0' 41 42 # Add any Sphinx extension module names here, as strings. They can be 43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 44 # ones. 45 extensions = [ 46 'sphinx.ext.autodoc', 47 'sphinx.ext.intersphinx', 48 'sphinx.ext.viewcode', 49 'sphinx.ext.napoleon', 50 51 # Falcon-specific extensions 52 'ext.rfc', 53 'ext.doorway', 54 ] 55 56 # Add any paths that contain templates here, relative to this directory. 57 templates_path = ['_templates'] 58 59 # The suffix of source filenames. 60 source_suffix = '.rst' 61 62 # The encoding of source files. 63 #source_encoding = 'utf-8-sig' 64 65 # The master toctree document. 66 master_doc = 'index' 67 68 # General information about the project. 69 project = u'Falcon' 70 copyright = u"2016 Falcon Contributors | Logo based on a <a href=https://commons.wikimedia.org/wiki/File:Brown-Falcon,-Vic,-3.1.2008.jpg>photograph by John O'Neill</a>" 71 72 # The version info for the project you're documenting, acts as replacement for 73 # |version| and |release|, also used in various other places throughout the 74 # built documents. 75 76 cfg = configparser.SafeConfigParser() 77 cfg.read('../setup.cfg') 78 tag = cfg.get('egg_info', 'tag_build') 79 80 html_context = { 81 'prerelease': bool(tag), # True if tag is not the empty string 82 } 83 84 # The short X.Y version. 85 version = '.'.join(falcon.__version__.split('.')[0:2]) + tag 86 87 # The full version, including alpha/beta/rc tags. 88 release = falcon.__version__ + tag 89 90 # The language for content autogenerated by Sphinx. Refer to documentation 91 # for a list of supported languages. 92 #language = None 93 94 # There are two options for replacing |today|: either, you set today to some 95 # non-false value, then it is used: 96 #today = '' 97 # Else, today_fmt is used as the format for a strftime call. 98 #today_fmt = '%B %d, %Y' 99 100 # List of patterns, relative to source directory, that match files and 101 # directories to ignore when looking for source files. 102 exclude_patterns = ['_build'] 103 104 # The reST default role (used for this markup: `text`) to use for all 105 # documents. 106 #default_role = None 107 108 # If true, '()' will be appended to :func: etc. cross-reference text. 109 #add_function_parentheses = True 110 111 # If true, the current module name will be prepended to all description 112 # unit titles (such as .. function::). 113 #add_module_names = True 114 115 # If true, sectionauthor and moduleauthor directives will be shown in the 116 # output. They are ignored by default. 117 #show_authors = False 118 119 # The name of the Pygments (syntax highlighting) style to use. 120 # pygments_style = 'flask_theme_support.FlaskyStyle' 121 122 # A list of ignored prefixes for module index sorting. 123 #modindex_common_prefix = [] 124 125 # If true, keep warnings as "system message" paragraphs in the built documents. 126 #keep_warnings = False 127 128 129 # -- Options for HTML output ---------------------------------------------- 130 131 # Add any paths that contain custom themes here, relative to this directory. 132 # html_theme_path = ['_themes'] 133 # html_theme = '' 134 135 html_theme = 'alabaster' 136 137 # if not on_rtd: 138 # # Use the RTD theme explicitly if it is available 139 # try: 140 # import sphinx_rtd_theme 141 142 # html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 143 # html_theme = "sphinx_rtd_theme" 144 # except ImportError: 145 # pass 146 147 # Theme options are theme-specific and customize the look and feel of a theme 148 # further. For a list of options available for each theme, see the 149 # documentation. 150 html_theme_options = { 151 'github_user': 'falconry', 152 'github_repo': 'falcon', 153 'github_button': False, 154 'github_banner': True, 155 'fixed_sidebar': True, 156 'show_powered_by': False, 157 'extra_nav_links': { 158 'Falcon Home': 'http://falconframework.org/', 159 'Get Help': 'community/help.html', 160 }, 161 } 162 163 # The name for this set of Sphinx documents. If None, it defaults to 164 # "<project> v<release> documentation". 165 #html_title = None 166 167 # A shorter title for the navigation bar. Default is the same as html_title. 168 #html_short_title = None 169 170 # The name of an image file (relative to this directory) to place at the top 171 # of the sidebar. 172 # html_logo = '../falcon.png' 173 174 # The name of an image file (within the static path) to use as favicon of the 175 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 176 # pixels large. 177 html_favicon = '_static/img/favicon.ico' 178 179 # Add any paths that contain custom static files (such as style sheets) here, 180 # relative to this directory. They are copied after the builtin static files, 181 # so a file named "default.css" will overwrite the builtin "default.css". 182 html_static_path = ['_static'] 183 184 # Add any extra paths that contain custom files (such as robots.txt or 185 # .htaccess) here, relative to this directory. These files are copied 186 # directly to the root of the documentation. 187 #html_extra_path = [] 188 189 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 190 # using the given strftime format. 191 #html_last_updated_fmt = '%b %d, %Y' 192 193 # If true, SmartyPants will be used to convert quotes and dashes to 194 # typographically correct entities. 195 #html_use_smartypants = True 196 197 # Custom sidebar templates, maps document names to template names. 198 # html_sidebars = { 199 # 'index': ['side-primary.html', 'searchbox.html'], 200 # '**': ['side-secondary.html', 'localtoc.html', 201 # 'relations.html', 'searchbox.html'] 202 # } 203 204 html_sidebars = { 205 '**': [ 206 'sidebar-top.html', 207 'about.html', 208 'navigation.html', 209 'relations.html', 210 'searchbox.html', 211 ] 212 } 213 214 # Additional templates that should be rendered to pages, maps page names to 215 # template names. 216 #html_additional_pages = {} 217 218 # If false, no module index is generated. 219 #html_domain_indices = True 220 221 # If false, no index is generated. 222 #html_use_index = True 223 224 # If true, the index is split into individual pages for each letter. 225 #html_split_index = False 226 227 # If true, links to the reST sources are added to the pages. 228 #html_show_sourcelink = True 229 230 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 231 #html_show_sphinx = True 232 233 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 234 html_show_copyright = True 235 236 # If true, an OpenSearch description file will be output, and all pages will 237 # contain a <link> tag referring to it. The value of this option must be the 238 # base URL from which the finished HTML is served. 239 #html_use_opensearch = '' 240 241 # This is the file name suffix for HTML files (e.g. ".xhtml"). 242 #html_file_suffix = None 243 244 # Output file base name for HTML help builder. 245 htmlhelp_basename = 'Falcondoc' 246 247 248 # -- Options for LaTeX output --------------------------------------------- 249 250 latex_elements = { 251 # The paper size ('letterpaper' or 'a4paper'). 252 #'papersize': 'letterpaper', 253 254 # The font size ('10pt', '11pt' or '12pt'). 255 #'pointsize': '10pt', 256 257 # Additional stuff for the LaTeX preamble. 258 #'preamble': '', 259 } 260 261 # Grouping the document tree into LaTeX files. List of tuples 262 # (source start file, target name, title, 263 # author, documentclass [howto, manual, or own class]). 264 latex_documents = [ 265 ('index', 'Falcon.tex', u'Falcon Documentation', 266 u'Kurt Griffiths et al.', 'manual'), 267 ] 268 269 # The name of an image file (relative to this directory) to place at the top of 270 # the title page. 271 #latex_logo = None 272 273 # For "manual" documents, if this is true, then toplevel headings are parts, 274 # not chapters. 275 #latex_use_parts = False 276 277 # If true, show page references after internal links. 278 #latex_show_pagerefs = False 279 280 # If true, show URL addresses after external links. 281 #latex_show_urls = False 282 283 # Documents to append as an appendix to all manuals. 284 #latex_appendices = [] 285 286 # If false, no module index is generated. 287 #latex_domain_indices = True 288 289 290 # -- Options for manual page output --------------------------------------- 291 292 # One entry per manual page. List of tuples 293 # (source start file, name, description, authors, manual section). 294 man_pages = [ 295 ('index', 'falcon', u'Falcon Documentation', 296 [u'Kurt Griffiths et al.'], 1) 297 ] 298 299 # If true, show URL addresses after external links. 300 #man_show_urls = False 301 302 303 # -- Options for Texinfo output ------------------------------------------- 304 305 # Grouping the document tree into Texinfo files. List of tuples 306 # (source start file, target name, title, author, 307 # dir menu entry, description, category) 308 texinfo_documents = [ 309 ('index', 'Falcon', u'Falcon Documentation', 310 u'Kurt Griffiths et al.', 'Falcon', 'One line description of project.', 311 'Miscellaneous'), 312 ] 313 314 # Documents to append as an appendix to all manuals. 315 #texinfo_appendices = [] 316 317 # If false, no module index is generated. 318 #texinfo_domain_indices = True 319 320 # How to display URL addresses: 'footnote', 'no', or 'inline'. 321 #texinfo_show_urls = 'footnote' 322 323 # If true, do not generate a @detailmenu in the "Top" node's menu. 324 #texinfo_no_detailmenu = False 325 326 327 # Example configuration for intersphinx: refer to the Python standard library. 328 intersphinx_mapping = {'http://docs.python.org/2': None} 329 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -152,7 +152,7 @@ 'github_repo': 'falcon', 'github_button': False, 'github_banner': True, - 'fixed_sidebar': True, + 'fixed_sidebar': False, 'show_powered_by': False, 'extra_nav_links': { 'Falcon Home': 'http://falconframework.org/',
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -152,7 +152,7 @@\n 'github_repo': 'falcon',\n 'github_button': False,\n 'github_banner': True,\n- 'fixed_sidebar': True,\n+ 'fixed_sidebar': False,\n 'show_powered_by': False,\n 'extra_nav_links': {\n 'Falcon Home': 'http://falconframework.org/',\n", "issue": "Doc site: On small screen height, sidebar (\"Navigation\") clips at bottom.\nUsing a laptop with 768 pixels height resolution.\r\n![sidebar_clip](https://cloud.githubusercontent.com/assets/13870149/21928904/8a6fef78-d9b3-11e6-847f-af666a42c113.png)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Falcon documentation build configuration file, created by\n# sphinx-quickstart on Wed Mar 12 14:14:02 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nimport falcon\n\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.insert(0, os.path.abspath('.'))\n\n# Path to custom themes\nsys.path.append(os.path.abspath('_themes'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n\n # Falcon-specific extensions\n 'ext.rfc',\n 'ext.doorway',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Falcon'\ncopyright = u\"2016 Falcon Contributors | Logo based on a <a href=https://commons.wikimedia.org/wiki/File:Brown-Falcon,-Vic,-3.1.2008.jpg>photograph by John O'Neill</a>\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\ncfg = configparser.SafeConfigParser()\ncfg.read('../setup.cfg')\ntag = cfg.get('egg_info', 'tag_build')\n\nhtml_context = {\n 'prerelease': bool(tag), # True if tag is not the empty string\n}\n\n# The short X.Y version.\nversion = '.'.join(falcon.__version__.split('.')[0:2]) + tag\n\n# The full version, including alpha/beta/rc tags.\nrelease = falcon.__version__ + tag\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'flask_theme_support.FlaskyStyle'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = ['_themes']\n# html_theme = ''\n\nhtml_theme = 'alabaster'\n\n# if not on_rtd:\n# # Use the RTD theme explicitly if it is available\n# try:\n# import sphinx_rtd_theme\n\n# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# html_theme = \"sphinx_rtd_theme\"\n# except ImportError:\n# pass\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'github_user': 'falconry',\n 'github_repo': 'falcon',\n 'github_button': False,\n 'github_banner': True,\n 'fixed_sidebar': True,\n 'show_powered_by': False,\n 'extra_nav_links': {\n 'Falcon Home': 'http://falconframework.org/',\n 'Get Help': 'community/help.html',\n },\n}\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = '../falcon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/img/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {\n# 'index': ['side-primary.html', 'searchbox.html'],\n# '**': ['side-secondary.html', 'localtoc.html',\n# 'relations.html', 'searchbox.html']\n# }\n\nhtml_sidebars = {\n '**': [\n 'sidebar-top.html',\n 'about.html',\n 'navigation.html',\n 'relations.html',\n 'searchbox.html',\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Falcondoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Falcon.tex', u'Falcon Documentation',\n u'Kurt Griffiths et al.', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'falcon', u'Falcon Documentation',\n [u'Kurt Griffiths et al.'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Falcon', u'Falcon Documentation',\n u'Kurt Griffiths et al.', 'Falcon', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'http://docs.python.org/2': None}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Falcon documentation build configuration file, created by\n# sphinx-quickstart on Wed Mar 12 14:14:02 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nimport falcon\n\n# on_rtd is whether we are on readthedocs.org\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.insert(0, os.path.abspath('.'))\n\n# Path to custom themes\nsys.path.append(os.path.abspath('_themes'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n\n # Falcon-specific extensions\n 'ext.rfc',\n 'ext.doorway',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Falcon'\ncopyright = u\"2016 Falcon Contributors | Logo based on a <a href=https://commons.wikimedia.org/wiki/File:Brown-Falcon,-Vic,-3.1.2008.jpg>photograph by John O'Neill</a>\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\ncfg = configparser.SafeConfigParser()\ncfg.read('../setup.cfg')\ntag = cfg.get('egg_info', 'tag_build')\n\nhtml_context = {\n 'prerelease': bool(tag), # True if tag is not the empty string\n}\n\n# The short X.Y version.\nversion = '.'.join(falcon.__version__.split('.')[0:2]) + tag\n\n# The full version, including alpha/beta/rc tags.\nrelease = falcon.__version__ + tag\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\n# pygments_style = 'flask_theme_support.FlaskyStyle'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = ['_themes']\n# html_theme = ''\n\nhtml_theme = 'alabaster'\n\n# if not on_rtd:\n# # Use the RTD theme explicitly if it is available\n# try:\n# import sphinx_rtd_theme\n\n# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n# html_theme = \"sphinx_rtd_theme\"\n# except ImportError:\n# pass\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'github_user': 'falconry',\n 'github_repo': 'falcon',\n 'github_button': False,\n 'github_banner': True,\n 'fixed_sidebar': False,\n 'show_powered_by': False,\n 'extra_nav_links': {\n 'Falcon Home': 'http://falconframework.org/',\n 'Get Help': 'community/help.html',\n },\n}\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = '../falcon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/img/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {\n# 'index': ['side-primary.html', 'searchbox.html'],\n# '**': ['side-secondary.html', 'localtoc.html',\n# 'relations.html', 'searchbox.html']\n# }\n\nhtml_sidebars = {\n '**': [\n 'sidebar-top.html',\n 'about.html',\n 'navigation.html',\n 'relations.html',\n 'searchbox.html',\n ]\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\nhtml_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Falcondoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Falcon.tex', u'Falcon Documentation',\n u'Kurt Griffiths et al.', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'falcon', u'Falcon Documentation',\n [u'Kurt Griffiths et al.'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Falcon', u'Falcon Documentation',\n u'Kurt Griffiths et al.', 'Falcon', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'http://docs.python.org/2': None}\n", "path": "docs/conf.py"}]}
3,796
107
gh_patches_debug_1978
rasdani/github-patches
git_diff
freedomofpress__securedrop-5369
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- doc-linkcheck needs some appeasement ## Description We have a [CI failure because of a link to a private repo](https://app.circleci.com/jobs/github/freedomofpress/securedrop/42146). ## Steps to Reproduce Run `make docs-linkcheck`. ## Expected Behavior That it would complete with no error. ## Actual Behavior The link to the private repo causes a 404. ## Comments That private URL should be added to `linkcheck_ignore` in `docs/conf.py`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # SecureDrop documentation build configuration file, created by 4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015. 5 # 6 # This file is execfile()d with the current directory set to its 7 # containing dir. 8 # 9 # Note that not all possible configuration values are present in this 10 # autogenerated file. 11 # 12 # All configuration values have a default; values that are commented out 13 # serve to show the default. 14 15 import os 16 17 # Detect if we're being built by Read the Docs 18 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs 19 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 20 21 # If extensions (or modules to document with autodoc) are in another directory, 22 # add these directories to sys.path here. If the directory is relative to the 23 # documentation root, use os.path.abspath to make it absolute, like shown here. 24 # sys.path.insert(0, os.path.abspath('.')) 25 26 # -- General configuration ------------------------------------------------ 27 28 # If your documentation needs a minimal Sphinx version, state it here. 29 # needs_sphinx = '1.0' 30 31 # Add any Sphinx extension module names here, as strings. They can be 32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 # ones. 34 extensions = ['sphinx.ext.todo', ] 35 36 # Add any paths that contain templates here, relative to this directory. 37 templates_path = ['_templates'] 38 39 # The suffix(es) of source filenames. 40 # You can specify multiple suffix as a list of string: 41 # source_suffix = ['.rst', '.md'] 42 source_suffix = '.rst' 43 44 # The encoding of source files. 45 # source_encoding = 'utf-8-sig' 46 47 # The master toctree document. 48 master_doc = 'index' 49 50 # General information about the project. 51 project = u'SecureDrop' 52 copyright = u'2015-2020, Freedom of the Press Foundation' 53 author = u'SecureDrop Team and Contributors' 54 55 # The version info for the project you're documenting, acts as replacement for 56 # |version| and |release|, also used in various other places throughout the 57 # built documents. 58 # 59 # The short X.Y version. 60 version = '1.4.1' 61 # The full version, including alpha/beta/rc tags. 62 release = '1.4.1' 63 64 # The language for content autogenerated by Sphinx. Refer to documentation 65 # for a list of supported languages. 66 # 67 # This is also used if you do content translation via gettext catalogs. 68 # Usually you set "language" from the command line for these cases. 69 language = None 70 71 # There are two options for replacing |today|: either, you set today to some 72 # non-false value, then it is used: 73 # today = '' 74 # Else, today_fmt is used as the format for a strftime call. 75 # today_fmt = '%B %d, %Y' 76 77 # List of patterns, relative to source directory, that match files and 78 # directories to ignore when looking for source files. 79 exclude_patterns = ['_build'] 80 81 # The reST default role (used for this markup: `text`) to use for all 82 # documents. 83 # default_role = None 84 85 # If true, '()' will be appended to :func: etc. cross-reference text. 86 # add_function_parentheses = True 87 88 # If true, the current module name will be prepended to all description 89 # unit titles (such as .. function::). 90 # add_module_names = True 91 92 # If true, sectionauthor and moduleauthor directives will be shown in the 93 # output. They are ignored by default. 94 # show_authors = False 95 96 # The name of the Pygments (syntax highlighting) style to use. 97 pygments_style = 'sphinx' 98 99 # A list of ignored prefixes for module index sorting. 100 # modindex_common_prefix = [] 101 102 # If true, keep warnings as "system message" paragraphs in the built documents. 103 # keep_warnings = False 104 105 # If true, `todo` and `todoList` produce output, else they produce nothing. 106 todo_include_todos = False 107 108 109 # -- Options for HTML output ---------------------------------------------- 110 111 # The theme to use for HTML and HTML Help pages. See the documentation for 112 # a list of builtin themes. 113 if on_rtd: 114 html_theme = 'default' 115 else: 116 try: 117 # If you want to build the docs locally using the RTD theme, 118 # you may need to install it: ``pip install sphinx_rtd_theme``. 119 # https://github.com/snide/sphinx_rtd_theme#via-package 120 import sphinx_rtd_theme 121 html_theme = "sphinx_rtd_theme" 122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 123 except ImportError: 124 # This theme is included with Sphinx and is quite nice (based 125 # on the Pocoo themes), but since we're using the RTD theme 126 # for the production docs, it's best to use that to avoid 127 # issues due to discrepancies between the themes. 128 html_theme = 'alabaster' 129 130 # Theme options are theme-specific and customize the look and feel of a theme 131 # further. For a list of options available for each theme, see the 132 # documentation. 133 # html_theme_options = {} 134 135 # Add any paths that contain custom themes here, relative to this directory. 136 # html_theme_path = [] 137 138 # The name for this set of Sphinx documents. If None, it defaults to 139 # "<project> v<release> documentation". 140 # html_title = None 141 142 # A shorter title for the navigation bar. Default is the same as html_title. 143 # html_short_title = None 144 145 # The name of an image file (relative to this directory) to place at the top 146 # of the sidebar. 147 html_logo = '../securedrop/static/i/favicon.png' 148 149 # The name of an image file (within the static path) to use as favicon of the 150 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 151 # pixels large. 152 # html_favicon = None 153 154 # Add any paths that contain custom static files (such as style sheets) here, 155 # relative to this directory. They are copied after the builtin static files, 156 # so a file named "default.css" will overwrite the builtin "default.css". 157 # html_static_path = ['_static'] 158 159 # Add any extra paths that contain custom files (such as robots.txt or 160 # .htaccess) here, relative to this directory. These files are copied 161 # directly to the root of the documentation. 162 # html_extra_path = [] 163 164 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 165 # using the given strftime format. 166 # html_last_updated_fmt = '%b %d, %Y' 167 168 # If true, SmartyPants will be used to convert quotes and dashes to 169 # typographically correct entities. 170 # html_use_smartypants = True 171 172 # Custom sidebar templates, maps document names to template names. 173 # html_sidebars = {} 174 175 # Additional templates that should be rendered to pages, maps page names to 176 # template names. 177 # html_additional_pages = {} 178 179 # If false, no module index is generated. 180 # html_domain_indices = True 181 182 # If false, no index is generated. 183 # html_use_index = True 184 185 # If true, the index is split into individual pages for each letter. 186 # html_split_index = False 187 188 # If true, links to the reST sources are added to the pages. 189 # html_show_sourcelink = True 190 191 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 192 # html_show_sphinx = True 193 194 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 195 # html_show_copyright = True 196 197 # If true, an OpenSearch description file will be output, and all pages will 198 # contain a <link> tag referring to it. The value of this option must be the 199 # base URL from which the finished HTML is served. 200 # html_use_opensearch = '' 201 202 # This is the file name suffix for HTML files (e.g. ".xhtml"). 203 # html_file_suffix = None 204 205 # Language to be used for generating the HTML full-text search index. 206 # Sphinx supports the following languages: 207 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 208 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 209 # html_search_language = 'en' 210 211 # A dictionary with options for the search language support, empty by default. 212 # Now only 'ja' uses this config value 213 # html_search_options = {'type': 'default'} 214 215 # The name of a javascript file (relative to the configuration directory) that 216 # implements a search results scorer. If empty, the default will be used. 217 # html_search_scorer = 'scorer.js' 218 219 # Output file base name for HTML help builder. 220 htmlhelp_basename = 'SecureDropdoc' 221 222 # -- Options for LaTeX output --------------------------------------------- 223 224 latex_elements = { 225 # The paper size ('letterpaper' or 'a4paper'). 226 # 'papersize': 'letterpaper', 227 228 # The font size ('10pt', '11pt' or '12pt'). 229 # 'pointsize': '10pt', 230 231 # Additional stuff for the LaTeX preamble. 232 # 'preamble': '', 233 234 # Latex figure (float) alignment 235 # 'figure_align': 'htbp', 236 } 237 238 # Grouping the document tree into LaTeX files. List of tuples 239 # (source start file, target name, title, 240 # author, documentclass [howto, manual, or own class]). 241 latex_documents = [ 242 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation', 243 author, 'manual'), 244 ] 245 246 # The name of an image file (relative to this directory) to place at the top of 247 # the title page. 248 # latex_logo = None 249 250 # For "manual" documents, if this is true, then toplevel headings are parts, 251 # not chapters. 252 # latex_use_parts = False 253 254 # If true, show page references after internal links. 255 # latex_show_pagerefs = False 256 257 # If true, show URL addresses after external links. 258 # latex_show_urls = False 259 260 # Documents to append as an appendix to all manuals. 261 # latex_appendices = [] 262 263 # If false, no module index is generated. 264 # latex_domain_indices = True 265 266 267 # -- Options for manual page output --------------------------------------- 268 269 # One entry per manual page. List of tuples 270 # (source start file, name, description, authors, manual section). 271 man_pages = [ 272 (master_doc, 'securedrop', u'SecureDrop Documentation', 273 [author], 1) 274 ] 275 276 # If true, show URL addresses after external links. 277 # man_show_urls = False 278 279 280 # -- Options for Texinfo output ------------------------------------------- 281 282 # Grouping the document tree into Texinfo files. List of tuples 283 # (source start file, target name, title, author, 284 # dir menu entry, description, category) 285 texinfo_documents = [ 286 (master_doc, 'SecureDrop', u'SecureDrop Documentation', 287 author, 'SecureDrop', 'One line description of project.', 288 'Miscellaneous'), 289 ] 290 291 # Documents to append as an appendix to all manuals. 292 # texinfo_appendices = [] 293 294 # If false, no module index is generated. 295 # texinfo_domain_indices = True 296 297 # How to display URL addresses: 'footnote', 'no', or 'inline'. 298 # texinfo_show_urls = 'footnote' 299 300 # If true, do not generate a @detailmenu in the "Top" node's menu. 301 # texinfo_no_detailmenu = False 302 303 # -- Options for linkcheck -- 304 305 linkcheck_retries = 3 306 307 linkcheck_ignore = [ 308 r'http://127.0.0.1(:\d+)?/?', 309 r'http://localhost(:\d+)?/?', 310 'https://forum.securedrop.org/admin/users/list/active', 311 'https://weblate.securedrop.org/projects/securedrop/securedrop/#repository', 312 ] 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -309,4 +309,5 @@ r'http://localhost(:\d+)?/?', 'https://forum.securedrop.org/admin/users/list/active', 'https://weblate.securedrop.org/projects/securedrop/securedrop/#repository', + 'https://github.com/freedomofpress/securedrop-debian-packages-lfs', ]
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -309,4 +309,5 @@\n r'http://localhost(:\\d+)?/?',\n 'https://forum.securedrop.org/admin/users/list/active',\n 'https://weblate.securedrop.org/projects/securedrop/securedrop/#repository',\n+ 'https://github.com/freedomofpress/securedrop-debian-packages-lfs',\n ]\n", "issue": "doc-linkcheck needs some appeasement\n## Description\r\n\r\nWe have a [CI failure because of a link to a private repo](https://app.circleci.com/jobs/github/freedomofpress/securedrop/42146). \r\n\r\n## Steps to Reproduce\r\n\r\nRun `make docs-linkcheck`.\r\n\r\n## Expected Behavior\r\n\r\nThat it would complete with no error.\r\n\r\n## Actual Behavior\r\n\r\nThe link to the private repo causes a 404. \r\n\r\n## Comments\r\n\r\nThat private URL should be added to `linkcheck_ignore` in `docs/conf.py`. \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2015-2020, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.4.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.4.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n# -- Options for linkcheck --\n\nlinkcheck_retries = 3\n\nlinkcheck_ignore = [\n r'http://127.0.0.1(:\\d+)?/?',\n r'http://localhost(:\\d+)?/?',\n 'https://forum.securedrop.org/admin/users/list/active',\n 'https://weblate.securedrop.org/projects/securedrop/securedrop/#repository',\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2015-2020, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.4.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.4.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n# -- Options for linkcheck --\n\nlinkcheck_retries = 3\n\nlinkcheck_ignore = [\n r'http://127.0.0.1(:\\d+)?/?',\n r'http://localhost(:\\d+)?/?',\n 'https://forum.securedrop.org/admin/users/list/active',\n 'https://weblate.securedrop.org/projects/securedrop/securedrop/#repository',\n 'https://github.com/freedomofpress/securedrop-debian-packages-lfs',\n]\n", "path": "docs/conf.py"}]}
3,869
107
gh_patches_debug_16047
rasdani/github-patches
git_diff
spotify__luigi-2564
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Postgres error if view depends on task_parameters.value There is a problem related to #2240. Luigi runs `ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT` every time `luigi.db_task_history.DbTaskHistory` is instantiated. Normally this would be fine because it will not change anything if the type of `value` is already `TEXT`. However, if you have a Postgres view that depends on this column, Postgres will refuse to run this instruction with an error message like `DETAIL: rule _RETURN on view taskparameter_view depends on column "value"`. There doesn't seem a sane way to make Postgres accept this statement, it [seems to be necessary](https://stackoverflow.com/questions/17989355/alter-column-ignoring-dependent-views) to drop the view (and all dependent views), run the `ALTER TABLE` statement and re-create them. It would be much nicer, if Luigi could check if the migration is actually necessary before executing that statement, similar to the way it checks for *sqlite*: ```PYTHON elif 'sqlite' in engine.dialect.name: # SQLite does not support changing column types. A database file will need # to be used to pickup this migration change. for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall(): if i['name'] == 'value' and i['type'] != 'TEXT': logger.warning( 'SQLite can not change column types. Please use a new database ' 'to pickup column type changes.' ) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `luigi/db_task_history.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2012-2015 Spotify AB 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 # 17 18 """ 19 Provides a database backend to the central scheduler. This lets you see historical runs. 20 See :ref:`TaskHistory` for information about how to turn out the task history feature. 21 """ 22 # 23 # Description: Added codes for visualization of how long each task takes 24 # running-time until it reaches the next status (failed or done) 25 # At "{base_url}/tasklist", all completed(failed or done) tasks are shown. 26 # At "{base_url}/tasklist", a user can select one specific task to see 27 # how its running-time has changed over time. 28 # At "{base_url}/tasklist/{task_name}", it visualizes a multi-bar graph 29 # that represents the changes of the running-time for a selected task 30 # up to the next status (failed or done). 31 # This visualization let us know how the running-time of the specific task 32 # has changed over time. 33 # 34 # Copyright 2015 Naver Corp. 35 # Author Yeseul Park ([email protected]) 36 # 37 38 import datetime 39 import logging 40 from contextlib import contextmanager 41 42 from luigi import six 43 44 from luigi import configuration 45 from luigi import task_history 46 from luigi.task_status import DONE, FAILED, PENDING, RUNNING 47 48 import sqlalchemy 49 import sqlalchemy.ext.declarative 50 import sqlalchemy.orm 51 import sqlalchemy.orm.collections 52 from sqlalchemy.engine import reflection 53 Base = sqlalchemy.ext.declarative.declarative_base() 54 55 logger = logging.getLogger('luigi-interface') 56 57 58 class DbTaskHistory(task_history.TaskHistory): 59 """ 60 Task History that writes to a database using sqlalchemy. 61 Also has methods for useful db queries. 62 """ 63 CURRENT_SOURCE_VERSION = 1 64 65 @contextmanager 66 def _session(self, session=None): 67 if session: 68 yield session 69 else: 70 session = self.session_factory() 71 try: 72 yield session 73 except BaseException: 74 session.rollback() 75 raise 76 else: 77 session.commit() 78 79 def __init__(self): 80 config = configuration.get_config() 81 connection_string = config.get('task_history', 'db_connection') 82 self.engine = sqlalchemy.create_engine(connection_string) 83 self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False) 84 Base.metadata.create_all(self.engine) 85 self.tasks = {} # task_id -> TaskRecord 86 87 _upgrade_schema(self.engine) 88 89 def task_scheduled(self, task): 90 htask = self._get_task(task, status=PENDING) 91 self._add_task_event(htask, TaskEvent(event_name=PENDING, ts=datetime.datetime.now())) 92 93 def task_finished(self, task, successful): 94 event_name = DONE if successful else FAILED 95 htask = self._get_task(task, status=event_name) 96 self._add_task_event(htask, TaskEvent(event_name=event_name, ts=datetime.datetime.now())) 97 98 def task_started(self, task, worker_host): 99 htask = self._get_task(task, status=RUNNING, host=worker_host) 100 self._add_task_event(htask, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now())) 101 102 def _get_task(self, task, status, host=None): 103 if task.id in self.tasks: 104 htask = self.tasks[task.id] 105 htask.status = status 106 if host: 107 htask.host = host 108 else: 109 htask = self.tasks[task.id] = task_history.StoredTask(task, status, host) 110 return htask 111 112 def _add_task_event(self, task, event): 113 for (task_record, session) in self._find_or_create_task(task): 114 task_record.events.append(event) 115 116 def _find_or_create_task(self, task): 117 with self._session() as session: 118 if task.record_id is not None: 119 logger.debug("Finding task with record_id [%d]", task.record_id) 120 task_record = session.query(TaskRecord).get(task.record_id) 121 if not task_record: 122 raise Exception("Task with record_id, but no matching Task record!") 123 yield (task_record, session) 124 else: 125 task_record = TaskRecord(task_id=task._task.id, name=task.task_family, host=task.host) 126 for (k, v) in six.iteritems(task.parameters): 127 task_record.parameters[k] = TaskParameter(name=k, value=v) 128 session.add(task_record) 129 yield (task_record, session) 130 if task.host: 131 task_record.host = task.host 132 task.record_id = task_record.id 133 134 def find_all_by_parameters(self, task_name, session=None, **task_params): 135 """ 136 Find tasks with the given task_name and the same parameters as the kwargs. 137 """ 138 with self._session(session) as session: 139 query = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name) 140 for (k, v) in six.iteritems(task_params): 141 alias = sqlalchemy.orm.aliased(TaskParameter) 142 query = query.join(alias).filter(alias.name == k, alias.value == v) 143 144 tasks = query.order_by(TaskEvent.ts) 145 for task in tasks: 146 # Sanity check 147 assert all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params)) 148 149 yield task 150 151 def find_all_by_name(self, task_name, session=None): 152 """ 153 Find all tasks with the given task_name. 154 """ 155 return self.find_all_by_parameters(task_name, session) 156 157 def find_latest_runs(self, session=None): 158 """ 159 Return tasks that have been updated in the past 24 hours. 160 """ 161 with self._session(session) as session: 162 yesterday = datetime.datetime.now() - datetime.timedelta(days=1) 163 return session.query(TaskRecord).\ 164 join(TaskEvent).\ 165 filter(TaskEvent.ts >= yesterday).\ 166 group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\ 167 order_by(TaskEvent.ts.desc()).\ 168 all() 169 170 def find_all_runs(self, session=None): 171 """ 172 Return all tasks that have been updated. 173 """ 174 with self._session(session) as session: 175 return session.query(TaskRecord).all() 176 177 def find_all_events(self, session=None): 178 """ 179 Return all running/failed/done events. 180 """ 181 with self._session(session) as session: 182 return session.query(TaskEvent).all() 183 184 def find_task_by_id(self, id, session=None): 185 """ 186 Find task with the given record ID. 187 """ 188 with self._session(session) as session: 189 return session.query(TaskRecord).get(id) 190 191 192 class TaskParameter(Base): 193 """ 194 Table to track luigi.Parameter()s of a Task. 195 """ 196 __tablename__ = 'task_parameters' 197 task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True) 198 name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True) 199 value = sqlalchemy.Column(sqlalchemy.Text()) 200 201 def __repr__(self): 202 return "TaskParameter(task_id=%d, name=%s, value=%s)" % (self.task_id, self.name, self.value) 203 204 205 class TaskEvent(Base): 206 """ 207 Table to track when a task is scheduled, starts, finishes, and fails. 208 """ 209 __tablename__ = 'task_events' 210 id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 211 task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), index=True) 212 event_name = sqlalchemy.Column(sqlalchemy.String(20)) 213 ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False) 214 215 def __repr__(self): 216 return "TaskEvent(task_id=%s, event_name=%s, ts=%s" % (self.task_id, self.event_name, self.ts) 217 218 219 class TaskRecord(Base): 220 """ 221 Base table to track information about a luigi.Task. 222 223 References to other tables are available through task.events, task.parameters, etc. 224 """ 225 __tablename__ = 'tasks' 226 id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True) 227 task_id = sqlalchemy.Column(sqlalchemy.String(200), index=True) 228 name = sqlalchemy.Column(sqlalchemy.String(128), index=True) 229 host = sqlalchemy.Column(sqlalchemy.String(128)) 230 parameters = sqlalchemy.orm.relationship( 231 'TaskParameter', 232 collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'), 233 cascade="all, delete-orphan") 234 events = sqlalchemy.orm.relationship( 235 'TaskEvent', 236 order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)), 237 backref='task') 238 239 def __repr__(self): 240 return "TaskRecord(name=%s, host=%s)" % (self.name, self.host) 241 242 243 def _upgrade_schema(engine): 244 """ 245 Ensure the database schema is up to date with the codebase. 246 247 :param engine: SQLAlchemy engine of the underlying database. 248 """ 249 inspector = reflection.Inspector.from_engine(engine) 250 with engine.connect() as conn: 251 252 # Upgrade 1. Add task_id column and index to tasks 253 if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]: 254 logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id') 255 conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)') 256 conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)') 257 258 # Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard 259 if 'mysql' in engine.dialect.name: 260 conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT') 261 elif 'oracle' in engine.dialect.name: 262 conn.execute('ALTER TABLE task_parameters MODIFY value TEXT') 263 elif 'mssql' in engine.dialect.name: 264 conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT') 265 elif 'postgresql' in engine.dialect.name: 266 conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT') 267 elif 'sqlite' in engine.dialect.name: 268 # SQLite does not support changing column types. A database file will need 269 # to be used to pickup this migration change. 270 for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall(): 271 if i['name'] == 'value' and i['type'] != 'TEXT': 272 logger.warning( 273 'SQLite can not change column types. Please use a new database ' 274 'to pickup column type changes.' 275 ) 276 else: 277 logger.warning( 278 'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format( 279 engine.dialect 280 ) 281 ) 282 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/luigi/db_task_history.py b/luigi/db_task_history.py --- a/luigi/db_task_history.py +++ b/luigi/db_task_history.py @@ -263,7 +263,9 @@ elif 'mssql' in engine.dialect.name: conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT') elif 'postgresql' in engine.dialect.name: - conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT') + if str([x for x in inspector.get_columns('task_parameters') + if x['name'] == 'value'][0]['type']) != 'TEXT': + conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT') elif 'sqlite' in engine.dialect.name: # SQLite does not support changing column types. A database file will need # to be used to pickup this migration change.
{"golden_diff": "diff --git a/luigi/db_task_history.py b/luigi/db_task_history.py\n--- a/luigi/db_task_history.py\n+++ b/luigi/db_task_history.py\n@@ -263,7 +263,9 @@\n elif 'mssql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT')\n elif 'postgresql' in engine.dialect.name:\n- conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')\n+ if str([x for x in inspector.get_columns('task_parameters')\n+ if x['name'] == 'value'][0]['type']) != 'TEXT':\n+ conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')\n elif 'sqlite' in engine.dialect.name:\n # SQLite does not support changing column types. A database file will need\n # to be used to pickup this migration change.\n", "issue": "Postgres error if view depends on task_parameters.value\nThere is a problem related to #2240. Luigi runs `ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT` every time `luigi.db_task_history.DbTaskHistory` is instantiated.\r\nNormally this would be fine because it will not change anything if the type of `value` is already `TEXT`.\r\nHowever, if you have a Postgres view that depends on this column, Postgres will refuse to run this instruction with an error message like `DETAIL: rule _RETURN on view taskparameter_view depends on column \"value\"`.\r\n\r\nThere doesn't seem a sane way to make Postgres accept this statement, it [seems to be necessary](https://stackoverflow.com/questions/17989355/alter-column-ignoring-dependent-views) to drop the view (and all dependent views), run the `ALTER TABLE` statement and re-create them.\r\n\r\nIt would be much nicer, if Luigi could check if the migration is actually necessary before executing that statement, similar to the way it checks for *sqlite*:\r\n\r\n```PYTHON\r\nelif 'sqlite' in engine.dialect.name:\r\n # SQLite does not support changing column types. A database file will need\r\n # to be used to pickup this migration change.\r\n for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall():\r\n if i['name'] == 'value' and i['type'] != 'TEXT':\r\n logger.warning(\r\n 'SQLite can not change column types. Please use a new database '\r\n 'to pickup column type changes.'\r\n )\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nProvides a database backend to the central scheduler. This lets you see historical runs.\nSee :ref:`TaskHistory` for information about how to turn out the task history feature.\n\"\"\"\n#\n# Description: Added codes for visualization of how long each task takes\n# running-time until it reaches the next status (failed or done)\n# At \"{base_url}/tasklist\", all completed(failed or done) tasks are shown.\n# At \"{base_url}/tasklist\", a user can select one specific task to see\n# how its running-time has changed over time.\n# At \"{base_url}/tasklist/{task_name}\", it visualizes a multi-bar graph\n# that represents the changes of the running-time for a selected task\n# up to the next status (failed or done).\n# This visualization let us know how the running-time of the specific task\n# has changed over time.\n#\n# Copyright 2015 Naver Corp.\n# Author Yeseul Park ([email protected])\n#\n\nimport datetime\nimport logging\nfrom contextlib import contextmanager\n\nfrom luigi import six\n\nfrom luigi import configuration\nfrom luigi import task_history\nfrom luigi.task_status import DONE, FAILED, PENDING, RUNNING\n\nimport sqlalchemy\nimport sqlalchemy.ext.declarative\nimport sqlalchemy.orm\nimport sqlalchemy.orm.collections\nfrom sqlalchemy.engine import reflection\nBase = sqlalchemy.ext.declarative.declarative_base()\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass DbTaskHistory(task_history.TaskHistory):\n \"\"\"\n Task History that writes to a database using sqlalchemy.\n Also has methods for useful db queries.\n \"\"\"\n CURRENT_SOURCE_VERSION = 1\n\n @contextmanager\n def _session(self, session=None):\n if session:\n yield session\n else:\n session = self.session_factory()\n try:\n yield session\n except BaseException:\n session.rollback()\n raise\n else:\n session.commit()\n\n def __init__(self):\n config = configuration.get_config()\n connection_string = config.get('task_history', 'db_connection')\n self.engine = sqlalchemy.create_engine(connection_string)\n self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)\n Base.metadata.create_all(self.engine)\n self.tasks = {} # task_id -> TaskRecord\n\n _upgrade_schema(self.engine)\n\n def task_scheduled(self, task):\n htask = self._get_task(task, status=PENDING)\n self._add_task_event(htask, TaskEvent(event_name=PENDING, ts=datetime.datetime.now()))\n\n def task_finished(self, task, successful):\n event_name = DONE if successful else FAILED\n htask = self._get_task(task, status=event_name)\n self._add_task_event(htask, TaskEvent(event_name=event_name, ts=datetime.datetime.now()))\n\n def task_started(self, task, worker_host):\n htask = self._get_task(task, status=RUNNING, host=worker_host)\n self._add_task_event(htask, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now()))\n\n def _get_task(self, task, status, host=None):\n if task.id in self.tasks:\n htask = self.tasks[task.id]\n htask.status = status\n if host:\n htask.host = host\n else:\n htask = self.tasks[task.id] = task_history.StoredTask(task, status, host)\n return htask\n\n def _add_task_event(self, task, event):\n for (task_record, session) in self._find_or_create_task(task):\n task_record.events.append(event)\n\n def _find_or_create_task(self, task):\n with self._session() as session:\n if task.record_id is not None:\n logger.debug(\"Finding task with record_id [%d]\", task.record_id)\n task_record = session.query(TaskRecord).get(task.record_id)\n if not task_record:\n raise Exception(\"Task with record_id, but no matching Task record!\")\n yield (task_record, session)\n else:\n task_record = TaskRecord(task_id=task._task.id, name=task.task_family, host=task.host)\n for (k, v) in six.iteritems(task.parameters):\n task_record.parameters[k] = TaskParameter(name=k, value=v)\n session.add(task_record)\n yield (task_record, session)\n if task.host:\n task_record.host = task.host\n task.record_id = task_record.id\n\n def find_all_by_parameters(self, task_name, session=None, **task_params):\n \"\"\"\n Find tasks with the given task_name and the same parameters as the kwargs.\n \"\"\"\n with self._session(session) as session:\n query = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name)\n for (k, v) in six.iteritems(task_params):\n alias = sqlalchemy.orm.aliased(TaskParameter)\n query = query.join(alias).filter(alias.name == k, alias.value == v)\n\n tasks = query.order_by(TaskEvent.ts)\n for task in tasks:\n # Sanity check\n assert all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params))\n\n yield task\n\n def find_all_by_name(self, task_name, session=None):\n \"\"\"\n Find all tasks with the given task_name.\n \"\"\"\n return self.find_all_by_parameters(task_name, session)\n\n def find_latest_runs(self, session=None):\n \"\"\"\n Return tasks that have been updated in the past 24 hours.\n \"\"\"\n with self._session(session) as session:\n yesterday = datetime.datetime.now() - datetime.timedelta(days=1)\n return session.query(TaskRecord).\\\n join(TaskEvent).\\\n filter(TaskEvent.ts >= yesterday).\\\n group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\\\n order_by(TaskEvent.ts.desc()).\\\n all()\n\n def find_all_runs(self, session=None):\n \"\"\"\n Return all tasks that have been updated.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskRecord).all()\n\n def find_all_events(self, session=None):\n \"\"\"\n Return all running/failed/done events.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskEvent).all()\n\n def find_task_by_id(self, id, session=None):\n \"\"\"\n Find task with the given record ID.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskRecord).get(id)\n\n\nclass TaskParameter(Base):\n \"\"\"\n Table to track luigi.Parameter()s of a Task.\n \"\"\"\n __tablename__ = 'task_parameters'\n task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True)\n name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True)\n value = sqlalchemy.Column(sqlalchemy.Text())\n\n def __repr__(self):\n return \"TaskParameter(task_id=%d, name=%s, value=%s)\" % (self.task_id, self.name, self.value)\n\n\nclass TaskEvent(Base):\n \"\"\"\n Table to track when a task is scheduled, starts, finishes, and fails.\n \"\"\"\n __tablename__ = 'task_events'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), index=True)\n event_name = sqlalchemy.Column(sqlalchemy.String(20))\n ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False)\n\n def __repr__(self):\n return \"TaskEvent(task_id=%s, event_name=%s, ts=%s\" % (self.task_id, self.event_name, self.ts)\n\n\nclass TaskRecord(Base):\n \"\"\"\n Base table to track information about a luigi.Task.\n\n References to other tables are available through task.events, task.parameters, etc.\n \"\"\"\n __tablename__ = 'tasks'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n task_id = sqlalchemy.Column(sqlalchemy.String(200), index=True)\n name = sqlalchemy.Column(sqlalchemy.String(128), index=True)\n host = sqlalchemy.Column(sqlalchemy.String(128))\n parameters = sqlalchemy.orm.relationship(\n 'TaskParameter',\n collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'),\n cascade=\"all, delete-orphan\")\n events = sqlalchemy.orm.relationship(\n 'TaskEvent',\n order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)),\n backref='task')\n\n def __repr__(self):\n return \"TaskRecord(name=%s, host=%s)\" % (self.name, self.host)\n\n\ndef _upgrade_schema(engine):\n \"\"\"\n Ensure the database schema is up to date with the codebase.\n\n :param engine: SQLAlchemy engine of the underlying database.\n \"\"\"\n inspector = reflection.Inspector.from_engine(engine)\n with engine.connect() as conn:\n\n # Upgrade 1. Add task_id column and index to tasks\n if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]:\n logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id')\n conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)')\n conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)')\n\n # Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard\n if 'mysql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT')\n elif 'oracle' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters MODIFY value TEXT')\n elif 'mssql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT')\n elif 'postgresql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')\n elif 'sqlite' in engine.dialect.name:\n # SQLite does not support changing column types. A database file will need\n # to be used to pickup this migration change.\n for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall():\n if i['name'] == 'value' and i['type'] != 'TEXT':\n logger.warning(\n 'SQLite can not change column types. Please use a new database '\n 'to pickup column type changes.'\n )\n else:\n logger.warning(\n 'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format(\n engine.dialect\n )\n )\n", "path": "luigi/db_task_history.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nProvides a database backend to the central scheduler. This lets you see historical runs.\nSee :ref:`TaskHistory` for information about how to turn out the task history feature.\n\"\"\"\n#\n# Description: Added codes for visualization of how long each task takes\n# running-time until it reaches the next status (failed or done)\n# At \"{base_url}/tasklist\", all completed(failed or done) tasks are shown.\n# At \"{base_url}/tasklist\", a user can select one specific task to see\n# how its running-time has changed over time.\n# At \"{base_url}/tasklist/{task_name}\", it visualizes a multi-bar graph\n# that represents the changes of the running-time for a selected task\n# up to the next status (failed or done).\n# This visualization let us know how the running-time of the specific task\n# has changed over time.\n#\n# Copyright 2015 Naver Corp.\n# Author Yeseul Park ([email protected])\n#\n\nimport datetime\nimport logging\nfrom contextlib import contextmanager\n\nfrom luigi import six\n\nfrom luigi import configuration\nfrom luigi import task_history\nfrom luigi.task_status import DONE, FAILED, PENDING, RUNNING\n\nimport sqlalchemy\nimport sqlalchemy.ext.declarative\nimport sqlalchemy.orm\nimport sqlalchemy.orm.collections\nfrom sqlalchemy.engine import reflection\nBase = sqlalchemy.ext.declarative.declarative_base()\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass DbTaskHistory(task_history.TaskHistory):\n \"\"\"\n Task History that writes to a database using sqlalchemy.\n Also has methods for useful db queries.\n \"\"\"\n CURRENT_SOURCE_VERSION = 1\n\n @contextmanager\n def _session(self, session=None):\n if session:\n yield session\n else:\n session = self.session_factory()\n try:\n yield session\n except BaseException:\n session.rollback()\n raise\n else:\n session.commit()\n\n def __init__(self):\n config = configuration.get_config()\n connection_string = config.get('task_history', 'db_connection')\n self.engine = sqlalchemy.create_engine(connection_string)\n self.session_factory = sqlalchemy.orm.sessionmaker(bind=self.engine, expire_on_commit=False)\n Base.metadata.create_all(self.engine)\n self.tasks = {} # task_id -> TaskRecord\n\n _upgrade_schema(self.engine)\n\n def task_scheduled(self, task):\n htask = self._get_task(task, status=PENDING)\n self._add_task_event(htask, TaskEvent(event_name=PENDING, ts=datetime.datetime.now()))\n\n def task_finished(self, task, successful):\n event_name = DONE if successful else FAILED\n htask = self._get_task(task, status=event_name)\n self._add_task_event(htask, TaskEvent(event_name=event_name, ts=datetime.datetime.now()))\n\n def task_started(self, task, worker_host):\n htask = self._get_task(task, status=RUNNING, host=worker_host)\n self._add_task_event(htask, TaskEvent(event_name=RUNNING, ts=datetime.datetime.now()))\n\n def _get_task(self, task, status, host=None):\n if task.id in self.tasks:\n htask = self.tasks[task.id]\n htask.status = status\n if host:\n htask.host = host\n else:\n htask = self.tasks[task.id] = task_history.StoredTask(task, status, host)\n return htask\n\n def _add_task_event(self, task, event):\n for (task_record, session) in self._find_or_create_task(task):\n task_record.events.append(event)\n\n def _find_or_create_task(self, task):\n with self._session() as session:\n if task.record_id is not None:\n logger.debug(\"Finding task with record_id [%d]\", task.record_id)\n task_record = session.query(TaskRecord).get(task.record_id)\n if not task_record:\n raise Exception(\"Task with record_id, but no matching Task record!\")\n yield (task_record, session)\n else:\n task_record = TaskRecord(task_id=task._task.id, name=task.task_family, host=task.host)\n for (k, v) in six.iteritems(task.parameters):\n task_record.parameters[k] = TaskParameter(name=k, value=v)\n session.add(task_record)\n yield (task_record, session)\n if task.host:\n task_record.host = task.host\n task.record_id = task_record.id\n\n def find_all_by_parameters(self, task_name, session=None, **task_params):\n \"\"\"\n Find tasks with the given task_name and the same parameters as the kwargs.\n \"\"\"\n with self._session(session) as session:\n query = session.query(TaskRecord).join(TaskEvent).filter(TaskRecord.name == task_name)\n for (k, v) in six.iteritems(task_params):\n alias = sqlalchemy.orm.aliased(TaskParameter)\n query = query.join(alias).filter(alias.name == k, alias.value == v)\n\n tasks = query.order_by(TaskEvent.ts)\n for task in tasks:\n # Sanity check\n assert all(k in task.parameters and v == str(task.parameters[k].value) for (k, v) in six.iteritems(task_params))\n\n yield task\n\n def find_all_by_name(self, task_name, session=None):\n \"\"\"\n Find all tasks with the given task_name.\n \"\"\"\n return self.find_all_by_parameters(task_name, session)\n\n def find_latest_runs(self, session=None):\n \"\"\"\n Return tasks that have been updated in the past 24 hours.\n \"\"\"\n with self._session(session) as session:\n yesterday = datetime.datetime.now() - datetime.timedelta(days=1)\n return session.query(TaskRecord).\\\n join(TaskEvent).\\\n filter(TaskEvent.ts >= yesterday).\\\n group_by(TaskRecord.id, TaskEvent.event_name, TaskEvent.ts).\\\n order_by(TaskEvent.ts.desc()).\\\n all()\n\n def find_all_runs(self, session=None):\n \"\"\"\n Return all tasks that have been updated.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskRecord).all()\n\n def find_all_events(self, session=None):\n \"\"\"\n Return all running/failed/done events.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskEvent).all()\n\n def find_task_by_id(self, id, session=None):\n \"\"\"\n Find task with the given record ID.\n \"\"\"\n with self._session(session) as session:\n return session.query(TaskRecord).get(id)\n\n\nclass TaskParameter(Base):\n \"\"\"\n Table to track luigi.Parameter()s of a Task.\n \"\"\"\n __tablename__ = 'task_parameters'\n task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), primary_key=True)\n name = sqlalchemy.Column(sqlalchemy.String(128), primary_key=True)\n value = sqlalchemy.Column(sqlalchemy.Text())\n\n def __repr__(self):\n return \"TaskParameter(task_id=%d, name=%s, value=%s)\" % (self.task_id, self.name, self.value)\n\n\nclass TaskEvent(Base):\n \"\"\"\n Table to track when a task is scheduled, starts, finishes, and fails.\n \"\"\"\n __tablename__ = 'task_events'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('tasks.id'), index=True)\n event_name = sqlalchemy.Column(sqlalchemy.String(20))\n ts = sqlalchemy.Column(sqlalchemy.TIMESTAMP, index=True, nullable=False)\n\n def __repr__(self):\n return \"TaskEvent(task_id=%s, event_name=%s, ts=%s\" % (self.task_id, self.event_name, self.ts)\n\n\nclass TaskRecord(Base):\n \"\"\"\n Base table to track information about a luigi.Task.\n\n References to other tables are available through task.events, task.parameters, etc.\n \"\"\"\n __tablename__ = 'tasks'\n id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)\n task_id = sqlalchemy.Column(sqlalchemy.String(200), index=True)\n name = sqlalchemy.Column(sqlalchemy.String(128), index=True)\n host = sqlalchemy.Column(sqlalchemy.String(128))\n parameters = sqlalchemy.orm.relationship(\n 'TaskParameter',\n collection_class=sqlalchemy.orm.collections.attribute_mapped_collection('name'),\n cascade=\"all, delete-orphan\")\n events = sqlalchemy.orm.relationship(\n 'TaskEvent',\n order_by=(sqlalchemy.desc(TaskEvent.ts), sqlalchemy.desc(TaskEvent.id)),\n backref='task')\n\n def __repr__(self):\n return \"TaskRecord(name=%s, host=%s)\" % (self.name, self.host)\n\n\ndef _upgrade_schema(engine):\n \"\"\"\n Ensure the database schema is up to date with the codebase.\n\n :param engine: SQLAlchemy engine of the underlying database.\n \"\"\"\n inspector = reflection.Inspector.from_engine(engine)\n with engine.connect() as conn:\n\n # Upgrade 1. Add task_id column and index to tasks\n if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]:\n logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id')\n conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)')\n conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)')\n\n # Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard\n if 'mysql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT')\n elif 'oracle' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters MODIFY value TEXT')\n elif 'mssql' in engine.dialect.name:\n conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT')\n elif 'postgresql' in engine.dialect.name:\n if str([x for x in inspector.get_columns('task_parameters')\n if x['name'] == 'value'][0]['type']) != 'TEXT':\n conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')\n elif 'sqlite' in engine.dialect.name:\n # SQLite does not support changing column types. A database file will need\n # to be used to pickup this migration change.\n for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall():\n if i['name'] == 'value' and i['type'] != 'TEXT':\n logger.warning(\n 'SQLite can not change column types. Please use a new database '\n 'to pickup column type changes.'\n )\n else:\n logger.warning(\n 'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format(\n engine.dialect\n )\n )\n", "path": "luigi/db_task_history.py"}]}
3,743
193
gh_patches_debug_1067
rasdani/github-patches
git_diff
encode__httpx-1799
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update h2 pin? ### Discussed in https://github.com/encode/httpx/discussions/1485 <div type='discussions-op-text'> <sup>Originally posted by **HarrySky** February 24, 2021</sup> Hi, some time ago `h2` pin was updated in `httpcore`: https://github.com/encode/httpcore/pull/208 But it is still pinned to `3.*` in this package's `http2` extra: https://github.com/encode/httpx/blob/0f280af8b170ed5cc48c12a894f71a8b5762f748/setup.py#L65 This is not an issue, as I can just add `h2>=3,<5` to my setup.py instead of using `httpx[http2]`, but maybe you want dependencies to be in sync with `httpcore`. EDIT: Using git blame we can see that before `http2` extra - `httpcore[http2]` was used instead of `h2` dependency directly.</div> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import re 5 from pathlib import Path 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 version = Path(package, "__version__.py").read_text() 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1) 16 17 18 def get_long_description(): 19 """ 20 Return the README. 21 """ 22 long_description = "" 23 with open("README.md", encoding="utf8") as f: 24 long_description += f.read() 25 long_description += "\n\n" 26 with open("CHANGELOG.md", encoding="utf8") as f: 27 long_description += f.read() 28 return long_description 29 30 31 def get_packages(package): 32 """ 33 Return root package and all sub-packages. 34 """ 35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")] 36 37 38 setup( 39 name="httpx", 40 python_requires=">=3.6", 41 version=get_version("httpx"), 42 url="https://github.com/encode/httpx", 43 project_urls={ 44 "Changelog": "https://github.com/encode/httpx/blob/master/CHANGELOG.md", 45 "Documentation": "https://www.python-httpx.org", 46 "Source": "https://github.com/encode/httpx", 47 }, 48 license="BSD", 49 description="The next generation HTTP client.", 50 long_description=get_long_description(), 51 long_description_content_type="text/markdown", 52 author="Tom Christie", 53 author_email="[email protected]", 54 package_data={"httpx": ["py.typed"]}, 55 packages=get_packages("httpx"), 56 include_package_data=True, 57 zip_safe=False, 58 install_requires=[ 59 "certifi", 60 "sniffio", 61 "rfc3986[idna2008]>=1.3,<2", 62 "httpcore>=0.13.3,<0.14.0", 63 "async_generator; python_version < '3.7'" 64 ], 65 extras_require={ 66 "http2": "h2==3.*", 67 "brotli": "brotlicffi==1.*", 68 }, 69 classifiers=[ 70 "Development Status :: 4 - Beta", 71 "Environment :: Web Environment", 72 "Intended Audience :: Developers", 73 "License :: OSI Approved :: BSD License", 74 "Operating System :: OS Independent", 75 "Topic :: Internet :: WWW/HTTP", 76 "Framework :: AsyncIO", 77 "Framework :: Trio", 78 "Programming Language :: Python :: 3", 79 "Programming Language :: Python :: 3.6", 80 "Programming Language :: Python :: 3.7", 81 "Programming Language :: Python :: 3.8", 82 "Programming Language :: Python :: 3.9", 83 "Programming Language :: Python :: 3.10", 84 "Programming Language :: Python :: 3 :: Only", 85 ], 86 ) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ "async_generator; python_version < '3.7'" ], extras_require={ - "http2": "h2==3.*", + "http2": "h2>=3,<5", "brotli": "brotlicffi==1.*", }, classifiers=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n- \"http2\": \"h2==3.*\",\n+ \"http2\": \"h2>=3,<5\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n", "issue": "Update h2 pin?\n### Discussed in https://github.com/encode/httpx/discussions/1485\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **HarrySky** February 24, 2021</sup>\r\nHi, some time ago `h2` pin was updated in `httpcore`: https://github.com/encode/httpcore/pull/208\r\n\r\nBut it is still pinned to `3.*` in this package's `http2` extra:\r\nhttps://github.com/encode/httpx/blob/0f280af8b170ed5cc48c12a894f71a8b5762f748/setup.py#L65\r\n\r\nThis is not an issue, as I can just add `h2>=3,<5` to my setup.py instead of using `httpx[http2]`, but maybe you want dependencies to be in sync with `httpcore`.\r\n\r\nEDIT: Using git blame we can see that before `http2` extra - `httpcore[http2]` was used instead of `h2` dependency directly.</div>\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n project_urls={\n \"Changelog\": \"https://github.com/encode/httpx/blob/master/CHANGELOG.md\",\n \"Documentation\": \"https://www.python-httpx.org\",\n \"Source\": \"https://github.com/encode/httpx\",\n },\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.13.3,<0.14.0\",\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n \"http2\": \"h2==3.*\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n project_urls={\n \"Changelog\": \"https://github.com/encode/httpx/blob/master/CHANGELOG.md\",\n \"Documentation\": \"https://www.python-httpx.org\",\n \"Source\": \"https://github.com/encode/httpx\",\n },\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"sniffio\",\n \"rfc3986[idna2008]>=1.3,<2\",\n \"httpcore>=0.13.3,<0.14.0\",\n \"async_generator; python_version < '3.7'\"\n ],\n extras_require={\n \"http2\": \"h2>=3,<5\",\n \"brotli\": \"brotlicffi==1.*\",\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n ],\n)\n", "path": "setup.py"}]}
1,315
98
gh_patches_debug_15412
rasdani/github-patches
git_diff
bridgecrewio__checkov-1928
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_AZURE_53 is a duplicate of CKV_AZURE_90 **Is your feature request related to a problem? Please describe.** CKV_AZURE_53 is a duplicate of CKV_AZURE_90. We should use only one. **Describe the solution you'd like** N/A **Describe alternatives you've considered** N/A **Additional context** N/A --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py` Content: ``` 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure that MySQL server disables public network access" 8 id = "CKV_AZURE_90" 9 supported_resources = ['azurerm_mysql_server'] 10 categories = [CheckCategories.NETWORKING] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return 'public_network_access_enabled' 15 16 def scan_resource_conf(self, conf): 17 public_access = conf.get('public_network_access_enabled', [True]) 18 if public_access[0]: 19 return CheckResult.FAILED 20 return CheckResult.PASSED 21 22 23 check = MySQLServerHasPublicAccessDisabled() 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py b/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py +++ /dev/null @@ -1,23 +0,0 @@ -from checkov.common.models.enums import CheckCategories, CheckResult -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck - - -class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck): - def __init__(self): - name = "Ensure that MySQL server disables public network access" - id = "CKV_AZURE_90" - supported_resources = ['azurerm_mysql_server'] - categories = [CheckCategories.NETWORKING] - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def get_inspected_key(self): - return 'public_network_access_enabled' - - def scan_resource_conf(self, conf): - public_access = conf.get('public_network_access_enabled', [True]) - if public_access[0]: - return CheckResult.FAILED - return CheckResult.PASSED - - -check = MySQLServerHasPublicAccessDisabled()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py b/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-from checkov.common.models.enums import CheckCategories, CheckResult\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-\n-\n-class MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that MySQL server disables public network access\"\n- id = \"CKV_AZURE_90\"\n- supported_resources = ['azurerm_mysql_server']\n- categories = [CheckCategories.NETWORKING]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return 'public_network_access_enabled'\n-\n- def scan_resource_conf(self, conf):\n- public_access = conf.get('public_network_access_enabled', [True])\n- if public_access[0]:\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n-\n-\n-check = MySQLServerHasPublicAccessDisabled()\n", "issue": "CKV_AZURE_53 is a duplicate of CKV_AZURE_90\n**Is your feature request related to a problem? Please describe.**\r\nCKV_AZURE_53 is a duplicate of CKV_AZURE_90. We should use only one.\r\n\r\n**Describe the solution you'd like**\r\nN/A\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass MySQLServerHasPublicAccessDisabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that MySQL server disables public network access\"\n id = \"CKV_AZURE_90\"\n supported_resources = ['azurerm_mysql_server']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'public_network_access_enabled'\n\n def scan_resource_conf(self, conf):\n public_access = conf.get('public_network_access_enabled', [True])\n if public_access[0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = MySQLServerHasPublicAccessDisabled()\n", "path": "checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/azure/MySQLServerPublicAccessDisabled.py"}]}
595
293
gh_patches_debug_9349
rasdani/github-patches
git_diff
nerfstudio-project__nerfstudio-1919
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pytorch 2.0 nerfacto optimizer inf check error **Describe the bug** Nerfacto fails under PyTorch 2.0 with the `no inf checks error`. **Error** ``` Traceback (most recent call last): File "/home/kulhajon/.conda/envs/nerfstudio-torch20/bin/ns-train", line 8, in <module> sys.exit(entrypoint()) File "/home/kulhajon/repos/nerfstudio/scripts/train.py", line 247, in entrypoint main( File "/home/kulhajon/repos/nerfstudio/scripts/train.py", line 233, in main launch( File "/home/kulhajon/repos/nerfstudio/scripts/train.py", line 172, in launch main_func(local_rank=0, world_size=world_size, config=config) File "/home/kulhajon/repos/nerfstudio/scripts/train.py", line 87, in train_loop trainer.train() File "/home/kulhajon/repos/nerfstudio/nerfstudio/engine/trainer.py", line 236, in train loss, loss_dict, metrics_dict = self.train_iteration(step) File "/home/kulhajon/repos/nerfstudio/nerfstudio/utils/profiler.py", line 93, in inner out = func(*args, **kwargs) File "/home/kulhajon/repos/nerfstudio/nerfstudio/engine/trainer.py", line 425, in train_iteration self.optimizers.optimizer_scaler_step_all(self.grad_scaler) File "/home/kulhajon/repos/nerfstudio/nerfstudio/engine/optimizers.py", line 130, in optimizer_scaler_step_all grad_scaler.step(optimizer) File "/home/kulhajon/.conda/envs/nerfstudio-torch20/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py", line 368, in step assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer." AssertionError: No inf checks were recorded for this optimizer. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nerfstudio/engine/optimizers.py` Content: ``` 1 # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Optimizers class. 17 """ 18 from __future__ import annotations 19 20 from dataclasses import dataclass 21 from typing import Any, Dict, List, Optional, Type 22 23 import torch 24 from torch.cuda.amp.grad_scaler import GradScaler 25 from torch.nn.parameter import Parameter 26 27 from nerfstudio.configs import base_config 28 from nerfstudio.utils import writer 29 30 31 # Optimizer related configs 32 @dataclass 33 class OptimizerConfig(base_config.PrintableConfig): 34 """Basic optimizer config with RAdam""" 35 36 _target: Type = torch.optim.Adam 37 """The optimizer class to use.""" 38 lr: float = 0.0005 39 """The learning rate to use.""" 40 eps: float = 1e-08 41 """The epsilon value to use.""" 42 max_norm: Optional[float] = None 43 """The max norm to use for gradient clipping.""" 44 45 # TODO: somehow make this more generic. i dont like the idea of overriding the setup function 46 # but also not sure how to go about passing things into predefined torch objects. 47 def setup(self, params) -> torch.optim.Optimizer: 48 """Returns the instantiated object using the config.""" 49 kwargs = vars(self).copy() 50 kwargs.pop("_target") 51 kwargs.pop("max_norm") 52 return self._target(params, **kwargs) 53 54 55 @dataclass 56 class AdamOptimizerConfig(OptimizerConfig): 57 """Basic optimizer config with Adam""" 58 59 _target: Type = torch.optim.Adam 60 weight_decay: float = 0 61 """The weight decay to use.""" 62 63 64 @dataclass 65 class RAdamOptimizerConfig(OptimizerConfig): 66 """Basic optimizer config with RAdam""" 67 68 _target: Type = torch.optim.RAdam 69 weight_decay: float = 0 70 """The weight decay to use.""" 71 72 73 class Optimizers: 74 """A set of optimizers. 75 76 Args: 77 config: The optimizer configuration object. 78 param_groups: A dictionary of parameter groups to optimize. 79 """ 80 81 def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]) -> None: 82 self.config = config 83 self.optimizers = {} 84 self.schedulers = {} 85 self.parameters = {} 86 for param_group_name, params in param_groups.items(): 87 lr_init = config[param_group_name]["optimizer"].lr 88 self.optimizers[param_group_name] = config[param_group_name]["optimizer"].setup(params=params) 89 self.parameters[param_group_name] = params 90 if config[param_group_name]["scheduler"]: 91 self.schedulers[param_group_name] = ( 92 config[param_group_name]["scheduler"] 93 .setup() 94 .get_scheduler(optimizer=self.optimizers[param_group_name], lr_init=lr_init) 95 ) 96 97 def optimizer_step(self, param_group_name: str) -> None: 98 """Fetch and step corresponding optimizer. 99 100 Args: 101 param_group_name: name of optimizer to step forward 102 """ 103 self.optimizers[param_group_name].step() 104 105 def scheduler_step(self, param_group_name: str) -> None: 106 """Fetch and step corresponding scheduler. 107 108 Args: 109 param_group_name: name of scheduler to step forward 110 """ 111 if self.config.param_group_name.scheduler: # type: ignore 112 self.schedulers[param_group_name].step() 113 114 def zero_grad_all(self) -> None: 115 """Zero the gradients for all optimizer parameters.""" 116 for _, optimizer in self.optimizers.items(): 117 optimizer.zero_grad() 118 119 def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None: 120 """Take an optimizer step using a grad scaler. 121 122 Args: 123 grad_scaler: GradScaler to use 124 """ 125 for param_group, optimizer in self.optimizers.items(): 126 max_norm = self.config[param_group]["optimizer"].max_norm 127 if max_norm is not None: 128 grad_scaler.unscale_(optimizer) 129 torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm) 130 grad_scaler.step(optimizer) 131 132 def optimizer_step_all(self) -> None: 133 """Run step for all optimizers.""" 134 for param_group, optimizer in self.optimizers.items(): 135 # note that they key is the parameter name 136 max_norm = self.config[param_group]["optimizer"].max_norm 137 if max_norm is not None: 138 torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm) 139 optimizer.step() 140 141 def scheduler_step_all(self, step: int) -> None: 142 """Run step for all schedulers. 143 144 Args: 145 step: the current step 146 """ 147 for param_group_name, scheduler in self.schedulers.items(): 148 scheduler.step() 149 # TODO(ethan): clean this up. why is there indexing into a list? 150 lr = scheduler.get_last_lr()[0] 151 writer.put_scalar(name=f"learning_rate/{param_group_name}", scalar=lr, step=step) 152 153 def load_optimizers(self, loaded_state: Dict[str, Any]) -> None: 154 """Helper to load the optimizer state from previous checkpoint 155 156 Args: 157 loaded_state: the state from the previous checkpoint 158 """ 159 for k, v in loaded_state.items(): 160 self.optimizers[k].load_state_dict(v) 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nerfstudio/engine/optimizers.py b/nerfstudio/engine/optimizers.py --- a/nerfstudio/engine/optimizers.py +++ b/nerfstudio/engine/optimizers.py @@ -127,7 +127,8 @@ if max_norm is not None: grad_scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm) - grad_scaler.step(optimizer) + if any(any(p.grad is not None for p in g["params"]) for g in optimizer.param_groups): + grad_scaler.step(optimizer) def optimizer_step_all(self) -> None: """Run step for all optimizers."""
{"golden_diff": "diff --git a/nerfstudio/engine/optimizers.py b/nerfstudio/engine/optimizers.py\n--- a/nerfstudio/engine/optimizers.py\n+++ b/nerfstudio/engine/optimizers.py\n@@ -127,7 +127,8 @@\n if max_norm is not None:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n- grad_scaler.step(optimizer)\n+ if any(any(p.grad is not None for p in g[\"params\"]) for g in optimizer.param_groups):\n+ grad_scaler.step(optimizer)\n \n def optimizer_step_all(self) -> None:\n \"\"\"Run step for all optimizers.\"\"\"\n", "issue": "Pytorch 2.0 nerfacto optimizer inf check error\n**Describe the bug**\r\nNerfacto fails under PyTorch 2.0 with the `no inf checks error`.\r\n\r\n**Error**\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/kulhajon/.conda/envs/nerfstudio-torch20/bin/ns-train\", line 8, in <module>\r\n sys.exit(entrypoint())\r\n File \"/home/kulhajon/repos/nerfstudio/scripts/train.py\", line 247, in entrypoint\r\n main(\r\n File \"/home/kulhajon/repos/nerfstudio/scripts/train.py\", line 233, in main\r\n launch(\r\n File \"/home/kulhajon/repos/nerfstudio/scripts/train.py\", line 172, in launch\r\n main_func(local_rank=0, world_size=world_size, config=config)\r\n File \"/home/kulhajon/repos/nerfstudio/scripts/train.py\", line 87, in train_loop\r\n trainer.train()\r\n File \"/home/kulhajon/repos/nerfstudio/nerfstudio/engine/trainer.py\", line 236, in train\r\n loss, loss_dict, metrics_dict = self.train_iteration(step)\r\n File \"/home/kulhajon/repos/nerfstudio/nerfstudio/utils/profiler.py\", line 93, in inner\r\n out = func(*args, **kwargs)\r\n File \"/home/kulhajon/repos/nerfstudio/nerfstudio/engine/trainer.py\", line 425, in train_iteration\r\n self.optimizers.optimizer_scaler_step_all(self.grad_scaler)\r\n File \"/home/kulhajon/repos/nerfstudio/nerfstudio/engine/optimizers.py\", line 130, in optimizer_scaler_step_all\r\n grad_scaler.step(optimizer)\r\n File \"/home/kulhajon/.conda/envs/nerfstudio-torch20/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py\", line 368, in step\r\n assert len(optimizer_state[\"found_inf_per_device\"]) > 0, \"No inf checks were recorded for this optimizer.\"\r\nAssertionError: No inf checks were recorded for this optimizer.\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nOptimizers class.\n\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Type\n\nimport torch\nfrom torch.cuda.amp.grad_scaler import GradScaler\nfrom torch.nn.parameter import Parameter\n\nfrom nerfstudio.configs import base_config\nfrom nerfstudio.utils import writer\n\n\n# Optimizer related configs\n@dataclass\nclass OptimizerConfig(base_config.PrintableConfig):\n \"\"\"Basic optimizer config with RAdam\"\"\"\n\n _target: Type = torch.optim.Adam\n \"\"\"The optimizer class to use.\"\"\"\n lr: float = 0.0005\n \"\"\"The learning rate to use.\"\"\"\n eps: float = 1e-08\n \"\"\"The epsilon value to use.\"\"\"\n max_norm: Optional[float] = None\n \"\"\"The max norm to use for gradient clipping.\"\"\"\n\n # TODO: somehow make this more generic. i dont like the idea of overriding the setup function\n # but also not sure how to go about passing things into predefined torch objects.\n def setup(self, params) -> torch.optim.Optimizer:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n kwargs = vars(self).copy()\n kwargs.pop(\"_target\")\n kwargs.pop(\"max_norm\")\n return self._target(params, **kwargs)\n\n\n@dataclass\nclass AdamOptimizerConfig(OptimizerConfig):\n \"\"\"Basic optimizer config with Adam\"\"\"\n\n _target: Type = torch.optim.Adam\n weight_decay: float = 0\n \"\"\"The weight decay to use.\"\"\"\n\n\n@dataclass\nclass RAdamOptimizerConfig(OptimizerConfig):\n \"\"\"Basic optimizer config with RAdam\"\"\"\n\n _target: Type = torch.optim.RAdam\n weight_decay: float = 0\n \"\"\"The weight decay to use.\"\"\"\n\n\nclass Optimizers:\n \"\"\"A set of optimizers.\n\n Args:\n config: The optimizer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]) -> None:\n self.config = config\n self.optimizers = {}\n self.schedulers = {}\n self.parameters = {}\n for param_group_name, params in param_groups.items():\n lr_init = config[param_group_name][\"optimizer\"].lr\n self.optimizers[param_group_name] = config[param_group_name][\"optimizer\"].setup(params=params)\n self.parameters[param_group_name] = params\n if config[param_group_name][\"scheduler\"]:\n self.schedulers[param_group_name] = (\n config[param_group_name][\"scheduler\"]\n .setup()\n .get_scheduler(optimizer=self.optimizers[param_group_name], lr_init=lr_init)\n )\n\n def optimizer_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding optimizer.\n\n Args:\n param_group_name: name of optimizer to step forward\n \"\"\"\n self.optimizers[param_group_name].step()\n\n def scheduler_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding scheduler.\n\n Args:\n param_group_name: name of scheduler to step forward\n \"\"\"\n if self.config.param_group_name.scheduler: # type: ignore\n self.schedulers[param_group_name].step()\n\n def zero_grad_all(self) -> None:\n \"\"\"Zero the gradients for all optimizer parameters.\"\"\"\n for _, optimizer in self.optimizers.items():\n optimizer.zero_grad()\n\n def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:\n \"\"\"Take an optimizer step using a grad scaler.\n\n Args:\n grad_scaler: GradScaler to use\n \"\"\"\n for param_group, optimizer in self.optimizers.items():\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n grad_scaler.step(optimizer)\n\n def optimizer_step_all(self) -> None:\n \"\"\"Run step for all optimizers.\"\"\"\n for param_group, optimizer in self.optimizers.items():\n # note that they key is the parameter name\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n optimizer.step()\n\n def scheduler_step_all(self, step: int) -> None:\n \"\"\"Run step for all schedulers.\n\n Args:\n step: the current step\n \"\"\"\n for param_group_name, scheduler in self.schedulers.items():\n scheduler.step()\n # TODO(ethan): clean this up. why is there indexing into a list?\n lr = scheduler.get_last_lr()[0]\n writer.put_scalar(name=f\"learning_rate/{param_group_name}\", scalar=lr, step=step)\n\n def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the optimizer state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.optimizers[k].load_state_dict(v)\n", "path": "nerfstudio/engine/optimizers.py"}], "after_files": [{"content": "# Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nOptimizers class.\n\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Type\n\nimport torch\nfrom torch.cuda.amp.grad_scaler import GradScaler\nfrom torch.nn.parameter import Parameter\n\nfrom nerfstudio.configs import base_config\nfrom nerfstudio.utils import writer\n\n\n# Optimizer related configs\n@dataclass\nclass OptimizerConfig(base_config.PrintableConfig):\n \"\"\"Basic optimizer config with RAdam\"\"\"\n\n _target: Type = torch.optim.Adam\n \"\"\"The optimizer class to use.\"\"\"\n lr: float = 0.0005\n \"\"\"The learning rate to use.\"\"\"\n eps: float = 1e-08\n \"\"\"The epsilon value to use.\"\"\"\n max_norm: Optional[float] = None\n \"\"\"The max norm to use for gradient clipping.\"\"\"\n\n # TODO: somehow make this more generic. i dont like the idea of overriding the setup function\n # but also not sure how to go about passing things into predefined torch objects.\n def setup(self, params) -> torch.optim.Optimizer:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n kwargs = vars(self).copy()\n kwargs.pop(\"_target\")\n kwargs.pop(\"max_norm\")\n return self._target(params, **kwargs)\n\n\n@dataclass\nclass AdamOptimizerConfig(OptimizerConfig):\n \"\"\"Basic optimizer config with Adam\"\"\"\n\n _target: Type = torch.optim.Adam\n weight_decay: float = 0\n \"\"\"The weight decay to use.\"\"\"\n\n\n@dataclass\nclass RAdamOptimizerConfig(OptimizerConfig):\n \"\"\"Basic optimizer config with RAdam\"\"\"\n\n _target: Type = torch.optim.RAdam\n weight_decay: float = 0\n \"\"\"The weight decay to use.\"\"\"\n\n\nclass Optimizers:\n \"\"\"A set of optimizers.\n\n Args:\n config: The optimizer configuration object.\n param_groups: A dictionary of parameter groups to optimize.\n \"\"\"\n\n def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]) -> None:\n self.config = config\n self.optimizers = {}\n self.schedulers = {}\n self.parameters = {}\n for param_group_name, params in param_groups.items():\n lr_init = config[param_group_name][\"optimizer\"].lr\n self.optimizers[param_group_name] = config[param_group_name][\"optimizer\"].setup(params=params)\n self.parameters[param_group_name] = params\n if config[param_group_name][\"scheduler\"]:\n self.schedulers[param_group_name] = (\n config[param_group_name][\"scheduler\"]\n .setup()\n .get_scheduler(optimizer=self.optimizers[param_group_name], lr_init=lr_init)\n )\n\n def optimizer_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding optimizer.\n\n Args:\n param_group_name: name of optimizer to step forward\n \"\"\"\n self.optimizers[param_group_name].step()\n\n def scheduler_step(self, param_group_name: str) -> None:\n \"\"\"Fetch and step corresponding scheduler.\n\n Args:\n param_group_name: name of scheduler to step forward\n \"\"\"\n if self.config.param_group_name.scheduler: # type: ignore\n self.schedulers[param_group_name].step()\n\n def zero_grad_all(self) -> None:\n \"\"\"Zero the gradients for all optimizer parameters.\"\"\"\n for _, optimizer in self.optimizers.items():\n optimizer.zero_grad()\n\n def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:\n \"\"\"Take an optimizer step using a grad scaler.\n\n Args:\n grad_scaler: GradScaler to use\n \"\"\"\n for param_group, optimizer in self.optimizers.items():\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n grad_scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n if any(any(p.grad is not None for p in g[\"params\"]) for g in optimizer.param_groups):\n grad_scaler.step(optimizer)\n\n def optimizer_step_all(self) -> None:\n \"\"\"Run step for all optimizers.\"\"\"\n for param_group, optimizer in self.optimizers.items():\n # note that they key is the parameter name\n max_norm = self.config[param_group][\"optimizer\"].max_norm\n if max_norm is not None:\n torch.nn.utils.clip_grad_norm_(self.parameters[param_group], max_norm)\n optimizer.step()\n\n def scheduler_step_all(self, step: int) -> None:\n \"\"\"Run step for all schedulers.\n\n Args:\n step: the current step\n \"\"\"\n for param_group_name, scheduler in self.schedulers.items():\n scheduler.step()\n # TODO(ethan): clean this up. why is there indexing into a list?\n lr = scheduler.get_last_lr()[0]\n writer.put_scalar(name=f\"learning_rate/{param_group_name}\", scalar=lr, step=step)\n\n def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Helper to load the optimizer state from previous checkpoint\n\n Args:\n loaded_state: the state from the previous checkpoint\n \"\"\"\n for k, v in loaded_state.items():\n self.optimizers[k].load_state_dict(v)\n", "path": "nerfstudio/engine/optimizers.py"}]}
2,391
155
gh_patches_debug_26543
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- typo not being caught in Stack Parameters ### CloudFormation Lint Version 0.67.0 ### What operating system are you using? WSL2 ### Describe the bug Type is not getting caught inside stack parameters: ```yaml AWSTemplateFormatVersion: '2010-09-09' Description: 'Typo not caught by cfn-lint' Parameters: WakandaBucket: Type: String Description: Something. Resources: WakandaStack: Type: AWS::CloudFormation::Stack Properties: TemplateURL: "https://somewhere/foo.yaml" Parameters: Foobar: !Red WakandaBucket Wakanda4EverBucket: Type: AWS::S3::Bucket Properties: BucketName: !Ref WakandaBucket ``` >Note that after Foobar comes `!Red` insead of `!Ref`. cfn-lint output for above template is empty, exitcode 0. When I change the BucketName !Ref to !Red, that one gives me a proper error. But not for the Stack Parameters. ### Expected behavior I expect something like `Property X/Y/Z has an illegal function Fn::Red` ### Reproduction template ```yaml AWSTemplateFormatVersion: '2010-09-09' Description: 'Typo not caught by cfn-lint' Parameters: WakandaBucket: Type: String Description: Something. Resources: WakandaStack: Type: AWS::CloudFormation::Stack Properties: TemplateURL: "https://somewhere/foo.yaml" Parameters: Foobar: !Red WakandaBucket Wakanda4EverBucket: Type: AWS::S3::Bucket Properties: BucketName: !Ref WakandaBucket ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/resources/properties/ValuePrimitiveType.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from cfnlint.rules import CloudFormationLintRule 6 from cfnlint.rules import RuleMatch 7 import cfnlint.helpers 8 9 10 class ValuePrimitiveType(CloudFormationLintRule): 11 """Check if Resource PrimitiveTypes are correct""" 12 13 id = 'E3012' 14 shortdesc = 'Check resource properties values' 15 description = 'Checks resource property values with Primitive Types for values that match those types.' 16 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#valueprimitivetype' 17 tags = ['resources'] 18 19 strict_exceptions = { 20 'AWS::CloudFormation::Stack': ['Parameters'], 21 'AWS::Lambda::Function.Environment': ['Variables'], 22 } 23 24 def __init__(self): 25 """Init""" 26 super().__init__() 27 self.resource_specs = [] 28 self.property_specs = [] 29 self.config_definition = {'strict': {'default': False, 'type': 'boolean'}} 30 self.configure() 31 32 def initialize(self, cfn): 33 """Initialize the rule""" 34 specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0]) 35 self.property_specs = specs.get('PropertyTypes') 36 self.resource_specs = specs.get('ResourceTypes') 37 for resource_spec in self.resource_specs: 38 self.resource_property_types.append(resource_spec) 39 for property_spec in self.property_specs: 40 self.resource_sub_property_types.append(property_spec) 41 42 def _value_check(self, value, path, item_type, strict_check, extra_args): 43 """Checks non strict""" 44 matches = [] 45 if not strict_check: 46 try: 47 if item_type in ['String']: 48 str(value) 49 elif item_type in ['Boolean']: 50 if value not in ['True', 'true', 'False', 'false']: 51 message = f'Property {"/".join(map(str, path))} should be of type {item_type}' 52 matches.append(RuleMatch(path, message, **extra_args)) 53 elif item_type in ['Integer', 'Long', 'Double']: 54 if isinstance(value, bool): 55 message = f'Property {"/".join(map(str, path))} should be of type {item_type}' 56 matches.append(RuleMatch(path, message, **extra_args)) 57 elif item_type in ['Integer']: 58 int(value) 59 elif item_type in ['Long']: 60 # Some times python will strip the decimals when doing a conversion 61 if isinstance(value, float): 62 message = f'Property {"/".join(map(str, path))} should be of type {item_type}' 63 matches.append(RuleMatch(path, message, **extra_args)) 64 int(value) 65 else: # has to be a Double 66 float(value) 67 except Exception: # pylint: disable=W0703 68 message = ( 69 f'Property {"/".join(map(str, path))} should be of type {item_type}' 70 ) 71 matches.append(RuleMatch(path, message, **extra_args)) 72 else: 73 message = ( 74 f'Property {"/".join(map(str, path))} should be of type {item_type}' 75 ) 76 matches.append(RuleMatch(path, message, **extra_args)) 77 78 return matches 79 80 def check_primitive_type(self, value, item_type, path, strict_check): 81 """Chec item type""" 82 matches = [] 83 if isinstance(value, (dict, list)) and item_type == 'Json': 84 return matches 85 if item_type in ['String']: 86 if not isinstance(value, (str)): 87 extra_args = { 88 'actual_type': type(value).__name__, 89 'expected_type': str.__name__, 90 } 91 matches.extend( 92 self._value_check(value, path, item_type, strict_check, extra_args) 93 ) 94 elif item_type in ['Boolean']: 95 if not isinstance(value, (bool)): 96 extra_args = { 97 'actual_type': type(value).__name__, 98 'expected_type': bool.__name__, 99 } 100 matches.extend( 101 self._value_check(value, path, item_type, strict_check, extra_args) 102 ) 103 elif item_type in ['Double']: 104 if not isinstance(value, (float, int)): 105 extra_args = { 106 'actual_type': type(value).__name__, 107 'expected_type': [float.__name__, int.__name__], 108 } 109 matches.extend( 110 self._value_check(value, path, item_type, strict_check, extra_args) 111 ) 112 elif item_type in ['Integer']: 113 if not isinstance(value, (int)): 114 extra_args = { 115 'actual_type': type(value).__name__, 116 'expected_type': int.__name__, 117 } 118 matches.extend( 119 self._value_check(value, path, item_type, strict_check, extra_args) 120 ) 121 elif item_type in ['Long']: 122 integer_types = (int,) 123 if not isinstance(value, integer_types): 124 extra_args = { 125 'actual_type': type(value).__name__, 126 'expected_type': ' or '.join([x.__name__ for x in integer_types]), 127 } 128 matches.extend( 129 self._value_check(value, path, item_type, strict_check, extra_args) 130 ) 131 elif isinstance(value, list): 132 message = ( 133 f'Property should be of type {item_type} at {"/".join(map(str, path))}' 134 ) 135 extra_args = { 136 'actual_type': type(value).__name__, 137 'expected_type': list.__name__, 138 } 139 matches.append(RuleMatch(path, message, **extra_args)) 140 141 return matches 142 143 def check_value(self, value, path, **kwargs): 144 """Check Value""" 145 matches = [] 146 primitive_type = kwargs.get('primitive_type', {}) 147 item_type = kwargs.get('item_type', {}) 148 strict_check = kwargs.get('non_strict', self.config['strict']) 149 150 if value is None: 151 message = f'Property value cannot be null {"/".join(map(str, path))}' 152 matches.append(RuleMatch(path, message)) 153 elif item_type in ['Map']: 154 if isinstance(value, dict): 155 for map_key, map_value in value.items(): 156 if not isinstance(map_value, dict): 157 matches.extend( 158 self.check_primitive_type( 159 map_value, 160 primitive_type, 161 path + [map_key], 162 strict_check, 163 ) 164 ) 165 else: 166 # some properties support primitive types and objects 167 # skip in the case it could be an object and the value is a object 168 if (item_type or primitive_type) and isinstance(value, dict): 169 return matches 170 matches.extend( 171 self.check_primitive_type(value, primitive_type, path, strict_check) 172 ) 173 174 return matches 175 176 def check(self, cfn, properties, specs, spec_type, path): 177 """Check itself""" 178 matches = [] 179 180 for prop in properties: 181 if prop in specs: 182 primitive_type = specs.get(prop).get('PrimitiveType') 183 if not primitive_type: 184 primitive_type = specs.get(prop).get('PrimitiveItemType') 185 if specs.get(prop).get('Type') in ['List', 'Map']: 186 item_type = specs.get(prop).get('Type') 187 else: 188 item_type = None 189 if primitive_type: 190 strict_check = self.config['strict'] 191 if spec_type in self.strict_exceptions: 192 if prop in self.strict_exceptions[spec_type]: 193 strict_check = False 194 matches.extend( 195 cfn.check_value( 196 properties, 197 prop, 198 path, 199 check_value=self.check_value, 200 primitive_type=primitive_type, 201 item_type=item_type, 202 non_strict=strict_check, 203 pass_if_null=True, 204 ) 205 ) 206 207 return matches 208 209 def match_resource_sub_properties(self, properties, property_type, path, cfn): 210 """Match for sub properties""" 211 matches = [] 212 213 if self.property_specs.get(property_type, {}).get('Properties'): 214 property_specs = self.property_specs.get(property_type, {}).get( 215 'Properties', {} 216 ) 217 matches.extend( 218 self.check(cfn, properties, property_specs, property_type, path) 219 ) 220 221 return matches 222 223 def match_resource_properties(self, properties, resource_type, path, cfn): 224 """Check CloudFormation Properties""" 225 matches = [] 226 227 resource_specs = self.resource_specs.get(resource_type, {}).get( 228 'Properties', {} 229 ) 230 matches.extend(self.check(cfn, properties, resource_specs, resource_type, path)) 231 232 return matches 233 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py --- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py +++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py @@ -162,6 +162,26 @@ strict_check, ) ) + else: + # types that represent a singular value (not json) + cfnlint.helpers.FUNCTIONS_SINGLE.sort() + if primitive_type in ['String', 'Boolean', 'Integer', 'Double']: + if len(map_value) != 1: + matches.append( + RuleMatch( + path, + f'Use a valid function [{", ".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]', + ) + ) + else: + for k in map_value.keys(): + if k not in cfnlint.helpers.FUNCTIONS_SINGLE: + matches.append( + RuleMatch( + path, + f'Use a valid function [{", ".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]', + ) + ) else: # some properties support primitive types and objects # skip in the case it could be an object and the value is a object
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n--- a/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n+++ b/src/cfnlint/rules/resources/properties/ValuePrimitiveType.py\n@@ -162,6 +162,26 @@\n strict_check,\n )\n )\n+ else:\n+ # types that represent a singular value (not json)\n+ cfnlint.helpers.FUNCTIONS_SINGLE.sort()\n+ if primitive_type in ['String', 'Boolean', 'Integer', 'Double']:\n+ if len(map_value) != 1:\n+ matches.append(\n+ RuleMatch(\n+ path,\n+ f'Use a valid function [{\", \".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]',\n+ )\n+ )\n+ else:\n+ for k in map_value.keys():\n+ if k not in cfnlint.helpers.FUNCTIONS_SINGLE:\n+ matches.append(\n+ RuleMatch(\n+ path,\n+ f'Use a valid function [{\", \".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]',\n+ )\n+ )\n else:\n # some properties support primitive types and objects\n # skip in the case it could be an object and the value is a object\n", "issue": "typo not being caught in Stack Parameters\n### CloudFormation Lint Version\r\n\r\n0.67.0\r\n\r\n### What operating system are you using?\r\n\r\nWSL2\r\n\r\n### Describe the bug\r\n\r\nType is not getting caught inside stack parameters:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\nDescription: 'Typo not caught by cfn-lint'\r\n\r\nParameters:\r\n WakandaBucket:\r\n Type: String\r\n Description: Something.\r\n\r\nResources:\r\n WakandaStack:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n TemplateURL: \"https://somewhere/foo.yaml\"\r\n Parameters:\r\n Foobar: !Red WakandaBucket\r\n\r\n Wakanda4EverBucket:\r\n Type: AWS::S3::Bucket\r\n Properties:\r\n BucketName: !Ref WakandaBucket\r\n```\r\n\r\n>Note that after Foobar comes `!Red` insead of `!Ref`.\r\n\r\ncfn-lint output for above template is empty, exitcode 0.\r\n\r\nWhen I change the BucketName !Ref to !Red, that one gives me a proper error. But not for the Stack Parameters.\r\n\r\n### Expected behavior\r\n\r\nI expect something like `Property X/Y/Z has an illegal function Fn::Red`\r\n\r\n### Reproduction template\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\nDescription: 'Typo not caught by cfn-lint'\r\n\r\nParameters:\r\n WakandaBucket:\r\n Type: String\r\n Description: Something.\r\n\r\nResources:\r\n WakandaStack:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n TemplateURL: \"https://somewhere/foo.yaml\"\r\n Parameters:\r\n Foobar: !Red WakandaBucket\r\n\r\n Wakanda4EverBucket:\r\n Type: AWS::S3::Bucket\r\n Properties:\r\n BucketName: !Ref WakandaBucket\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nimport cfnlint.helpers\n\n\nclass ValuePrimitiveType(CloudFormationLintRule):\n \"\"\"Check if Resource PrimitiveTypes are correct\"\"\"\n\n id = 'E3012'\n shortdesc = 'Check resource properties values'\n description = 'Checks resource property values with Primitive Types for values that match those types.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#valueprimitivetype'\n tags = ['resources']\n\n strict_exceptions = {\n 'AWS::CloudFormation::Stack': ['Parameters'],\n 'AWS::Lambda::Function.Environment': ['Variables'],\n }\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.resource_specs = []\n self.property_specs = []\n self.config_definition = {'strict': {'default': False, 'type': 'boolean'}}\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])\n self.property_specs = specs.get('PropertyTypes')\n self.resource_specs = specs.get('ResourceTypes')\n for resource_spec in self.resource_specs:\n self.resource_property_types.append(resource_spec)\n for property_spec in self.property_specs:\n self.resource_sub_property_types.append(property_spec)\n\n def _value_check(self, value, path, item_type, strict_check, extra_args):\n \"\"\"Checks non strict\"\"\"\n matches = []\n if not strict_check:\n try:\n if item_type in ['String']:\n str(value)\n elif item_type in ['Boolean']:\n if value not in ['True', 'true', 'False', 'false']:\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer', 'Long', 'Double']:\n if isinstance(value, bool):\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer']:\n int(value)\n elif item_type in ['Long']:\n # Some times python will strip the decimals when doing a conversion\n if isinstance(value, float):\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n int(value)\n else: # has to be a Double\n float(value)\n except Exception: # pylint: disable=W0703\n message = (\n f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n )\n matches.append(RuleMatch(path, message, **extra_args))\n else:\n message = (\n f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n )\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_primitive_type(self, value, item_type, path, strict_check):\n \"\"\"Chec item type\"\"\"\n matches = []\n if isinstance(value, (dict, list)) and item_type == 'Json':\n return matches\n if item_type in ['String']:\n if not isinstance(value, (str)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': str.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Boolean']:\n if not isinstance(value, (bool)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': bool.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Double']:\n if not isinstance(value, (float, int)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': [float.__name__, int.__name__],\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Integer']:\n if not isinstance(value, (int)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': int.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Long']:\n integer_types = (int,)\n if not isinstance(value, integer_types):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': ' or '.join([x.__name__ for x in integer_types]),\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif isinstance(value, list):\n message = (\n f'Property should be of type {item_type} at {\"/\".join(map(str, path))}'\n )\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': list.__name__,\n }\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_value(self, value, path, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n primitive_type = kwargs.get('primitive_type', {})\n item_type = kwargs.get('item_type', {})\n strict_check = kwargs.get('non_strict', self.config['strict'])\n\n if value is None:\n message = f'Property value cannot be null {\"/\".join(map(str, path))}'\n matches.append(RuleMatch(path, message))\n elif item_type in ['Map']:\n if isinstance(value, dict):\n for map_key, map_value in value.items():\n if not isinstance(map_value, dict):\n matches.extend(\n self.check_primitive_type(\n map_value,\n primitive_type,\n path + [map_key],\n strict_check,\n )\n )\n else:\n # some properties support primitive types and objects\n # skip in the case it could be an object and the value is a object\n if (item_type or primitive_type) and isinstance(value, dict):\n return matches\n matches.extend(\n self.check_primitive_type(value, primitive_type, path, strict_check)\n )\n\n return matches\n\n def check(self, cfn, properties, specs, spec_type, path):\n \"\"\"Check itself\"\"\"\n matches = []\n\n for prop in properties:\n if prop in specs:\n primitive_type = specs.get(prop).get('PrimitiveType')\n if not primitive_type:\n primitive_type = specs.get(prop).get('PrimitiveItemType')\n if specs.get(prop).get('Type') in ['List', 'Map']:\n item_type = specs.get(prop).get('Type')\n else:\n item_type = None\n if primitive_type:\n strict_check = self.config['strict']\n if spec_type in self.strict_exceptions:\n if prop in self.strict_exceptions[spec_type]:\n strict_check = False\n matches.extend(\n cfn.check_value(\n properties,\n prop,\n path,\n check_value=self.check_value,\n primitive_type=primitive_type,\n item_type=item_type,\n non_strict=strict_check,\n pass_if_null=True,\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n if self.property_specs.get(property_type, {}).get('Properties'):\n property_specs = self.property_specs.get(property_type, {}).get(\n 'Properties', {}\n )\n matches.extend(\n self.check(cfn, properties, property_specs, property_type, path)\n )\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n resource_specs = self.resource_specs.get(resource_type, {}).get(\n 'Properties', {}\n )\n matches.extend(self.check(cfn, properties, resource_specs, resource_type, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValuePrimitiveType.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nimport cfnlint.helpers\n\n\nclass ValuePrimitiveType(CloudFormationLintRule):\n \"\"\"Check if Resource PrimitiveTypes are correct\"\"\"\n\n id = 'E3012'\n shortdesc = 'Check resource properties values'\n description = 'Checks resource property values with Primitive Types for values that match those types.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#valueprimitivetype'\n tags = ['resources']\n\n strict_exceptions = {\n 'AWS::CloudFormation::Stack': ['Parameters'],\n 'AWS::Lambda::Function.Environment': ['Variables'],\n }\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.resource_specs = []\n self.property_specs = []\n self.config_definition = {'strict': {'default': False, 'type': 'boolean'}}\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n specs = cfnlint.helpers.RESOURCE_SPECS.get(cfn.regions[0])\n self.property_specs = specs.get('PropertyTypes')\n self.resource_specs = specs.get('ResourceTypes')\n for resource_spec in self.resource_specs:\n self.resource_property_types.append(resource_spec)\n for property_spec in self.property_specs:\n self.resource_sub_property_types.append(property_spec)\n\n def _value_check(self, value, path, item_type, strict_check, extra_args):\n \"\"\"Checks non strict\"\"\"\n matches = []\n if not strict_check:\n try:\n if item_type in ['String']:\n str(value)\n elif item_type in ['Boolean']:\n if value not in ['True', 'true', 'False', 'false']:\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer', 'Long', 'Double']:\n if isinstance(value, bool):\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n elif item_type in ['Integer']:\n int(value)\n elif item_type in ['Long']:\n # Some times python will strip the decimals when doing a conversion\n if isinstance(value, float):\n message = f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n matches.append(RuleMatch(path, message, **extra_args))\n int(value)\n else: # has to be a Double\n float(value)\n except Exception: # pylint: disable=W0703\n message = (\n f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n )\n matches.append(RuleMatch(path, message, **extra_args))\n else:\n message = (\n f'Property {\"/\".join(map(str, path))} should be of type {item_type}'\n )\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_primitive_type(self, value, item_type, path, strict_check):\n \"\"\"Chec item type\"\"\"\n matches = []\n if isinstance(value, (dict, list)) and item_type == 'Json':\n return matches\n if item_type in ['String']:\n if not isinstance(value, (str)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': str.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Boolean']:\n if not isinstance(value, (bool)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': bool.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Double']:\n if not isinstance(value, (float, int)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': [float.__name__, int.__name__],\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Integer']:\n if not isinstance(value, (int)):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': int.__name__,\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif item_type in ['Long']:\n integer_types = (int,)\n if not isinstance(value, integer_types):\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': ' or '.join([x.__name__ for x in integer_types]),\n }\n matches.extend(\n self._value_check(value, path, item_type, strict_check, extra_args)\n )\n elif isinstance(value, list):\n message = (\n f'Property should be of type {item_type} at {\"/\".join(map(str, path))}'\n )\n extra_args = {\n 'actual_type': type(value).__name__,\n 'expected_type': list.__name__,\n }\n matches.append(RuleMatch(path, message, **extra_args))\n\n return matches\n\n def check_value(self, value, path, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n primitive_type = kwargs.get('primitive_type', {})\n item_type = kwargs.get('item_type', {})\n strict_check = kwargs.get('non_strict', self.config['strict'])\n\n if value is None:\n message = f'Property value cannot be null {\"/\".join(map(str, path))}'\n matches.append(RuleMatch(path, message))\n elif item_type in ['Map']:\n if isinstance(value, dict):\n for map_key, map_value in value.items():\n if not isinstance(map_value, dict):\n matches.extend(\n self.check_primitive_type(\n map_value,\n primitive_type,\n path + [map_key],\n strict_check,\n )\n )\n else:\n # types that represent a singular value (not json)\n cfnlint.helpers.FUNCTIONS_SINGLE.sort()\n if primitive_type in ['String', 'Boolean', 'Integer', 'Double']:\n if len(map_value) != 1:\n matches.append(\n RuleMatch(\n path,\n f'Use a valid function [{\", \".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]',\n )\n )\n else:\n for k in map_value.keys():\n if k not in cfnlint.helpers.FUNCTIONS_SINGLE:\n matches.append(\n RuleMatch(\n path,\n f'Use a valid function [{\", \".join(cfnlint.helpers.FUNCTIONS_SINGLE)}] when providing a value of type [{primitive_type}]',\n )\n )\n else:\n # some properties support primitive types and objects\n # skip in the case it could be an object and the value is a object\n if (item_type or primitive_type) and isinstance(value, dict):\n return matches\n matches.extend(\n self.check_primitive_type(value, primitive_type, path, strict_check)\n )\n\n return matches\n\n def check(self, cfn, properties, specs, spec_type, path):\n \"\"\"Check itself\"\"\"\n matches = []\n\n for prop in properties:\n if prop in specs:\n primitive_type = specs.get(prop).get('PrimitiveType')\n if not primitive_type:\n primitive_type = specs.get(prop).get('PrimitiveItemType')\n if specs.get(prop).get('Type') in ['List', 'Map']:\n item_type = specs.get(prop).get('Type')\n else:\n item_type = None\n if primitive_type:\n strict_check = self.config['strict']\n if spec_type in self.strict_exceptions:\n if prop in self.strict_exceptions[spec_type]:\n strict_check = False\n matches.extend(\n cfn.check_value(\n properties,\n prop,\n path,\n check_value=self.check_value,\n primitive_type=primitive_type,\n item_type=item_type,\n non_strict=strict_check,\n pass_if_null=True,\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n if self.property_specs.get(property_type, {}).get('Properties'):\n property_specs = self.property_specs.get(property_type, {}).get(\n 'Properties', {}\n )\n matches.extend(\n self.check(cfn, properties, property_specs, property_type, path)\n )\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n resource_specs = self.resource_specs.get(resource_type, {}).get(\n 'Properties', {}\n )\n matches.extend(self.check(cfn, properties, resource_specs, resource_type, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ValuePrimitiveType.py"}]}
3,071
310
gh_patches_debug_196
rasdani/github-patches
git_diff
twisted__twisted-11622
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 22.8.0 This is the ticket to track the release of 22.8.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/twisted/_version.py` Content: ``` 1 """ 2 Provides Twisted version information. 3 """ 4 5 # This file is auto-generated! Do not edit! 6 # Use `python -m incremental.update Twisted` to change this file. 7 8 from incremental import Version 9 10 __version__ = Version("Twisted", 22, 4, 0, post=0) 11 __all__ = ["__version__"] 12 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/twisted/_version.py b/src/twisted/_version.py --- a/src/twisted/_version.py +++ b/src/twisted/_version.py @@ -7,5 +7,5 @@ from incremental import Version -__version__ = Version("Twisted", 22, 4, 0, post=0) +__version__ = Version("Twisted", 22, 8, 0, post=0) __all__ = ["__version__"]
{"golden_diff": "diff --git a/src/twisted/_version.py b/src/twisted/_version.py\n--- a/src/twisted/_version.py\n+++ b/src/twisted/_version.py\n@@ -7,5 +7,5 @@\n \n from incremental import Version\n \n-__version__ = Version(\"Twisted\", 22, 4, 0, post=0)\n+__version__ = Version(\"Twisted\", 22, 8, 0, post=0)\n __all__ = [\"__version__\"]\n", "issue": "Release 22.8.0\nThis is the ticket to track the release of 22.8.0\n", "before_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 4, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}], "after_files": [{"content": "\"\"\"\nProvides Twisted version information.\n\"\"\"\n\n# This file is auto-generated! Do not edit!\n# Use `python -m incremental.update Twisted` to change this file.\n\nfrom incremental import Version\n\n__version__ = Version(\"Twisted\", 22, 8, 0, post=0)\n__all__ = [\"__version__\"]\n", "path": "src/twisted/_version.py"}]}
377
113
gh_patches_debug_3835
rasdani/github-patches
git_diff
pantsbuild__pants-18687
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong version of Python used to build `pex_binary` targets in `2.16.0rc0` **Describe the bug** * Our CI image contains both Python 3.8 and 3.9. * We set `[python].interpreter_constraints = ["==3.8.*"]` in `pants.toml`. * At least one `pex_binary` depends on a version of `numpy` that (for reasons we haven't dug into) only works with Python 3.8, not Python 3.9 * We haven't investigated the build failures because we expect everything to run against Python 3.8 as configured by `[python].interpreter_constraints` After upgrading to Pants 2.16.0rc0 we see failures building the `pex_binary` in CI, with errors that indicate the build process is trying to build a dependency (`numpy`) against Python 3.9 instead of the expected/configured Python 3.8 This is very concerning. We still run Python 3.8 everywhere in production, so I don't want Pexes to be building against Python 3.9. I've downgraded us back to 2.16.0a1 for now and confirmed this fixes the problem. **Pants version** 2.16.0rc0 **OS** Linux **Additional info** I am suspicious of https://github.com/pantsbuild/pants/commit/d3d325777952435186be42443fb28fde6771fae7 and https://github.com/pantsbuild/pants/commit/e8d387ba6b4d4502e3b6db5ae68ffe7beeeb10a7 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/python/pants/backend/python/util_rules/pex_cli.py` Content: ``` 1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import annotations 5 6 import dataclasses 7 from dataclasses import dataclass 8 from typing import Iterable, List, Mapping, Optional, Tuple 9 10 from pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem 11 from pants.backend.python.subsystems.setup import PythonSetup 12 from pants.backend.python.util_rules import pex_environment 13 from pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem 14 from pants.core.util_rules import adhoc_binaries, external_tool 15 from pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary 16 from pants.core.util_rules.external_tool import ( 17 DownloadedExternalTool, 18 ExternalToolRequest, 19 TemplatedExternalTool, 20 ) 21 from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests 22 from pants.engine.internals.selectors import MultiGet 23 from pants.engine.platform import Platform 24 from pants.engine.process import Process, ProcessCacheScope 25 from pants.engine.rules import Get, collect_rules, rule 26 from pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content 27 from pants.util.frozendict import FrozenDict 28 from pants.util.logging import LogLevel 29 from pants.util.meta import classproperty 30 from pants.util.strutil import create_path_env_var 31 32 33 class PexCli(TemplatedExternalTool): 34 options_scope = "pex-cli" 35 name = "pex" 36 help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)." 37 38 default_version = "v2.1.130" 39 default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex" 40 version_constraints = ">=2.1.124,<3.0" 41 42 @classproperty 43 def default_known_versions(cls): 44 return [ 45 "|".join( 46 ( 47 cls.default_version, 48 plat, 49 "0ffb9fe8146945031d596f4559bd1803d5d9f70fe318adb385ed8c4a44cb7dec", 50 "4082176", 51 ) 52 ) 53 for plat in ["macos_arm64", "macos_x86_64", "linux_x86_64", "linux_arm64"] 54 ] 55 56 57 @dataclass(frozen=True) 58 class PexCliProcess: 59 subcommand: tuple[str, ...] 60 extra_args: tuple[str, ...] 61 description: str = dataclasses.field(compare=False) 62 additional_input_digest: Optional[Digest] 63 extra_env: Optional[FrozenDict[str, str]] 64 output_files: Optional[Tuple[str, ...]] 65 output_directories: Optional[Tuple[str, ...]] 66 level: LogLevel 67 concurrency_available: int 68 cache_scope: ProcessCacheScope 69 70 def __init__( 71 self, 72 *, 73 subcommand: Iterable[str], 74 extra_args: Iterable[str], 75 description: str, 76 additional_input_digest: Optional[Digest] = None, 77 extra_env: Optional[Mapping[str, str]] = None, 78 output_files: Optional[Iterable[str]] = None, 79 output_directories: Optional[Iterable[str]] = None, 80 level: LogLevel = LogLevel.INFO, 81 concurrency_available: int = 0, 82 cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL, 83 ) -> None: 84 object.__setattr__(self, "subcommand", tuple(subcommand)) 85 object.__setattr__(self, "extra_args", tuple(extra_args)) 86 object.__setattr__(self, "description", description) 87 object.__setattr__(self, "additional_input_digest", additional_input_digest) 88 object.__setattr__(self, "extra_env", FrozenDict(extra_env) if extra_env else None) 89 object.__setattr__(self, "output_files", tuple(output_files) if output_files else None) 90 object.__setattr__( 91 self, "output_directories", tuple(output_directories) if output_directories else None 92 ) 93 object.__setattr__(self, "level", level) 94 object.__setattr__(self, "concurrency_available", concurrency_available) 95 object.__setattr__(self, "cache_scope", cache_scope) 96 97 self.__post_init__() 98 99 def __post_init__(self) -> None: 100 if "--pex-root-path" in self.extra_args: 101 raise ValueError("`--pex-root` flag not allowed. We set its value for you.") 102 103 104 class PexPEX(DownloadedExternalTool): 105 """The Pex PEX binary.""" 106 107 108 @rule 109 async def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX: 110 pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform)) 111 return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe) 112 113 114 @rule 115 async def setup_pex_cli_process( 116 request: PexCliProcess, 117 pex_pex: PexPEX, 118 pex_env: PexEnvironment, 119 bootstrap_python: PythonBuildStandaloneBinary, 120 python_native_code: PythonNativeCodeSubsystem.EnvironmentAware, 121 global_options: GlobalOptions, 122 pex_subsystem: PexSubsystem, 123 python_setup: PythonSetup, 124 ) -> Process: 125 tmpdir = ".tmp" 126 gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))] 127 128 cert_args = [] 129 if global_options.ca_certs_path: 130 ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path) 131 gets.append(Get(Digest, CreateDigest((ca_certs_fc,)))) 132 cert_args = ["--cert", ca_certs_fc.path] 133 134 digests_to_merge = [pex_pex.digest] 135 digests_to_merge.extend(await MultiGet(gets)) 136 if request.additional_input_digest: 137 digests_to_merge.append(request.additional_input_digest) 138 input_digest = await Get(Digest, MergeDigests(digests_to_merge)) 139 140 global_args = [ 141 # Ensure Pex and its subprocesses create temporary files in the the process execution 142 # sandbox. It may make sense to do this generally for Processes, but in the short term we 143 # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to 144 # perform. Making the TMPDIR local to the sandbox allows control via 145 # --local-execution-root-dir for the local case and should work well with remote cases where 146 # a remoting implementation has to allow for processes producing large binaries in a 147 # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via 148 # environment variable allows Pex to absolutize the path ensuring subprocesses that change 149 # CWD can find the TMPDIR. 150 "--tmpdir", 151 tmpdir, 152 ] 153 154 if request.concurrency_available > 0: 155 global_args.extend(["--jobs", "{pants_concurrency}"]) 156 157 verbosity_args = [f"-{'v' * pex_subsystem.verbosity}"] if pex_subsystem.verbosity > 0 else [] 158 159 # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters 160 # when `--python` isn't an absolute path. 161 resolve_args = [ 162 *cert_args, 163 "--python-path", 164 create_path_env_var(pex_env.interpreter_search_paths), 165 ] 166 # All old-style pex runs take the --pip-version flag, but only certain subcommands of the 167 # `pex3` console script do. So if invoked with a subcommand, the caller must selectively 168 # set --pip-version only on subcommands that take it. 169 pip_version_args = ( 170 [] if request.subcommand else ["--pip-version", python_setup.pip_version.value] 171 ) 172 args = [ 173 *request.subcommand, 174 *global_args, 175 *verbosity_args, 176 *pip_version_args, 177 *resolve_args, 178 # NB: This comes at the end because it may use `--` passthrough args, # which must come at 179 # the end. 180 *request.extra_args, 181 ] 182 183 complete_pex_env = pex_env.in_sandbox(working_directory=None) 184 normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args) 185 env = { 186 **complete_pex_env.environment_dict(python=bootstrap_python), 187 **python_native_code.subprocess_env_vars, 188 **(request.extra_env or {}), 189 # If a subcommand is used, we need to use the `pex3` console script. 190 **({"PEX_SCRIPT": "pex3"} if request.subcommand else {}), 191 } 192 193 return Process( 194 normalized_argv, 195 description=request.description, 196 input_digest=input_digest, 197 env=env, 198 output_files=request.output_files, 199 output_directories=request.output_directories, 200 append_only_caches=complete_pex_env.append_only_caches, 201 immutable_input_digests=bootstrap_python.immutable_input_digests, 202 level=request.level, 203 concurrency_available=request.concurrency_available, 204 cache_scope=request.cache_scope, 205 ) 206 207 208 def rules(): 209 return [ 210 *collect_rules(), 211 *external_tool.rules(), 212 *pex_environment.rules(), 213 *adhoc_binaries.rules(), 214 ] 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py --- a/src/python/pants/backend/python/util_rules/pex_cli.py +++ b/src/python/pants/backend/python/util_rules/pex_cli.py @@ -37,7 +37,7 @@ default_version = "v2.1.130" default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex" - version_constraints = ">=2.1.124,<3.0" + version_constraints = ">=2.1.129,<3.0" @classproperty def default_known_versions(cls):
{"golden_diff": "diff --git a/src/python/pants/backend/python/util_rules/pex_cli.py b/src/python/pants/backend/python/util_rules/pex_cli.py\n--- a/src/python/pants/backend/python/util_rules/pex_cli.py\n+++ b/src/python/pants/backend/python/util_rules/pex_cli.py\n@@ -37,7 +37,7 @@\n \n default_version = \"v2.1.130\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n- version_constraints = \">=2.1.124,<3.0\"\n+ version_constraints = \">=2.1.129,<3.0\"\n \n @classproperty\n def default_known_versions(cls):\n", "issue": "Wrong version of Python used to build `pex_binary` targets in `2.16.0rc0`\n**Describe the bug**\r\n\r\n* Our CI image contains both Python 3.8 and 3.9.\r\n* We set `[python].interpreter_constraints = [\"==3.8.*\"]` in `pants.toml`.\r\n* At least one `pex_binary` depends on a version of `numpy` that (for reasons we haven't dug into) only works with Python 3.8, not Python 3.9\r\n * We haven't investigated the build failures because we expect everything to run against Python 3.8 as configured by `[python].interpreter_constraints`\r\n\r\nAfter upgrading to Pants 2.16.0rc0 we see failures building the `pex_binary` in CI, with errors that indicate the build process is trying to build a dependency (`numpy`) against Python 3.9 instead of the expected/configured Python 3.8\r\n\r\nThis is very concerning. We still run Python 3.8 everywhere in production, so I don't want Pexes to be building against Python 3.9. I've downgraded us back to 2.16.0a1 for now and confirmed this fixes the problem.\r\n\r\n**Pants version**\r\n\r\n2.16.0rc0\r\n\r\n**OS**\r\n\r\nLinux\r\n\r\n**Additional info**\r\n\r\nI am suspicious of https://github.com/pantsbuild/pants/commit/d3d325777952435186be42443fb28fde6771fae7 and https://github.com/pantsbuild/pants/commit/e8d387ba6b4d4502e3b6db5ae68ffe7beeeb10a7\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem\nfrom pants.core.util_rules import adhoc_binaries, external_tool\nfrom pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.130\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.124,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"0ffb9fe8146945031d596f4559bd1803d5d9f70fe318adb385ed8c4a44cb7dec\",\n \"4082176\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@dataclass(frozen=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n object.__setattr__(self, \"subcommand\", tuple(subcommand))\n object.__setattr__(self, \"extra_args\", tuple(extra_args))\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"additional_input_digest\", additional_input_digest)\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env) if extra_env else None)\n object.__setattr__(self, \"output_files\", tuple(output_files) if output_files else None)\n object.__setattr__(\n self, \"output_directories\", tuple(output_directories) if output_directories else None\n )\n object.__setattr__(self, \"level\", level)\n object.__setattr__(self, \"concurrency_available\", concurrency_available)\n object.__setattr__(self, \"cache_scope\", cache_scope)\n\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n bootstrap_python: PythonBuildStandaloneBinary,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n python_setup: PythonSetup,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters\n # when `--python` isn't an absolute path.\n resolve_args = [\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n ]\n # All old-style pex runs take the --pip-version flag, but only certain subcommands of the\n # `pex3` console script do. So if invoked with a subcommand, the caller must selectively\n # set --pip-version only on subcommands that take it.\n pip_version_args = (\n [] if request.subcommand else [\"--pip-version\", python_setup.pip_version.value]\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *pip_version_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args)\n env = {\n **complete_pex_env.environment_dict(python=bootstrap_python),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n immutable_input_digests=bootstrap_python.immutable_input_digests,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n *adhoc_binaries.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import dataclass\nfrom typing import Iterable, List, Mapping, Optional, Tuple\n\nfrom pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_environment\nfrom pants.backend.python.util_rules.pex_environment import PexEnvironment, PexSubsystem\nfrom pants.core.util_rules import adhoc_binaries, external_tool\nfrom pants.core.util_rules.adhoc_binaries import PythonBuildStandaloneBinary\nfrom pants.core.util_rules.external_tool import (\n DownloadedExternalTool,\n ExternalToolRequest,\n TemplatedExternalTool,\n)\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests\nfrom pants.engine.internals.selectors import MultiGet\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import Process, ProcessCacheScope\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content\nfrom pants.util.frozendict import FrozenDict\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import classproperty\nfrom pants.util.strutil import create_path_env_var\n\n\nclass PexCli(TemplatedExternalTool):\n options_scope = \"pex-cli\"\n name = \"pex\"\n help = \"The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex).\"\n\n default_version = \"v2.1.130\"\n default_url_template = \"https://github.com/pantsbuild/pex/releases/download/{version}/pex\"\n version_constraints = \">=2.1.129,<3.0\"\n\n @classproperty\n def default_known_versions(cls):\n return [\n \"|\".join(\n (\n cls.default_version,\n plat,\n \"0ffb9fe8146945031d596f4559bd1803d5d9f70fe318adb385ed8c4a44cb7dec\",\n \"4082176\",\n )\n )\n for plat in [\"macos_arm64\", \"macos_x86_64\", \"linux_x86_64\", \"linux_arm64\"]\n ]\n\n\n@dataclass(frozen=True)\nclass PexCliProcess:\n subcommand: tuple[str, ...]\n extra_args: tuple[str, ...]\n description: str = dataclasses.field(compare=False)\n additional_input_digest: Optional[Digest]\n extra_env: Optional[FrozenDict[str, str]]\n output_files: Optional[Tuple[str, ...]]\n output_directories: Optional[Tuple[str, ...]]\n level: LogLevel\n concurrency_available: int\n cache_scope: ProcessCacheScope\n\n def __init__(\n self,\n *,\n subcommand: Iterable[str],\n extra_args: Iterable[str],\n description: str,\n additional_input_digest: Optional[Digest] = None,\n extra_env: Optional[Mapping[str, str]] = None,\n output_files: Optional[Iterable[str]] = None,\n output_directories: Optional[Iterable[str]] = None,\n level: LogLevel = LogLevel.INFO,\n concurrency_available: int = 0,\n cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,\n ) -> None:\n object.__setattr__(self, \"subcommand\", tuple(subcommand))\n object.__setattr__(self, \"extra_args\", tuple(extra_args))\n object.__setattr__(self, \"description\", description)\n object.__setattr__(self, \"additional_input_digest\", additional_input_digest)\n object.__setattr__(self, \"extra_env\", FrozenDict(extra_env) if extra_env else None)\n object.__setattr__(self, \"output_files\", tuple(output_files) if output_files else None)\n object.__setattr__(\n self, \"output_directories\", tuple(output_directories) if output_directories else None\n )\n object.__setattr__(self, \"level\", level)\n object.__setattr__(self, \"concurrency_available\", concurrency_available)\n object.__setattr__(self, \"cache_scope\", cache_scope)\n\n self.__post_init__()\n\n def __post_init__(self) -> None:\n if \"--pex-root-path\" in self.extra_args:\n raise ValueError(\"`--pex-root` flag not allowed. We set its value for you.\")\n\n\nclass PexPEX(DownloadedExternalTool):\n \"\"\"The Pex PEX binary.\"\"\"\n\n\n@rule\nasync def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:\n pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))\n return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)\n\n\n@rule\nasync def setup_pex_cli_process(\n request: PexCliProcess,\n pex_pex: PexPEX,\n pex_env: PexEnvironment,\n bootstrap_python: PythonBuildStandaloneBinary,\n python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,\n global_options: GlobalOptions,\n pex_subsystem: PexSubsystem,\n python_setup: PythonSetup,\n) -> Process:\n tmpdir = \".tmp\"\n gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]\n\n cert_args = []\n if global_options.ca_certs_path:\n ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)\n gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))\n cert_args = [\"--cert\", ca_certs_fc.path]\n\n digests_to_merge = [pex_pex.digest]\n digests_to_merge.extend(await MultiGet(gets))\n if request.additional_input_digest:\n digests_to_merge.append(request.additional_input_digest)\n input_digest = await Get(Digest, MergeDigests(digests_to_merge))\n\n global_args = [\n # Ensure Pex and its subprocesses create temporary files in the the process execution\n # sandbox. It may make sense to do this generally for Processes, but in the short term we\n # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to\n # perform. Making the TMPDIR local to the sandbox allows control via\n # --local-execution-root-dir for the local case and should work well with remote cases where\n # a remoting implementation has to allow for processes producing large binaries in a\n # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via\n # environment variable allows Pex to absolutize the path ensuring subprocesses that change\n # CWD can find the TMPDIR.\n \"--tmpdir\",\n tmpdir,\n ]\n\n if request.concurrency_available > 0:\n global_args.extend([\"--jobs\", \"{pants_concurrency}\"])\n\n verbosity_args = [f\"-{'v' * pex_subsystem.verbosity}\"] if pex_subsystem.verbosity > 0 else []\n\n # NB: We should always pass `--python-path`, as that tells Pex where to look for interpreters\n # when `--python` isn't an absolute path.\n resolve_args = [\n *cert_args,\n \"--python-path\",\n create_path_env_var(pex_env.interpreter_search_paths),\n ]\n # All old-style pex runs take the --pip-version flag, but only certain subcommands of the\n # `pex3` console script do. So if invoked with a subcommand, the caller must selectively\n # set --pip-version only on subcommands that take it.\n pip_version_args = (\n [] if request.subcommand else [\"--pip-version\", python_setup.pip_version.value]\n )\n args = [\n *request.subcommand,\n *global_args,\n *verbosity_args,\n *pip_version_args,\n *resolve_args,\n # NB: This comes at the end because it may use `--` passthrough args, # which must come at\n # the end.\n *request.extra_args,\n ]\n\n complete_pex_env = pex_env.in_sandbox(working_directory=None)\n normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args)\n env = {\n **complete_pex_env.environment_dict(python=bootstrap_python),\n **python_native_code.subprocess_env_vars,\n **(request.extra_env or {}),\n # If a subcommand is used, we need to use the `pex3` console script.\n **({\"PEX_SCRIPT\": \"pex3\"} if request.subcommand else {}),\n }\n\n return Process(\n normalized_argv,\n description=request.description,\n input_digest=input_digest,\n env=env,\n output_files=request.output_files,\n output_directories=request.output_directories,\n append_only_caches=complete_pex_env.append_only_caches,\n immutable_input_digests=bootstrap_python.immutable_input_digests,\n level=request.level,\n concurrency_available=request.concurrency_available,\n cache_scope=request.cache_scope,\n )\n\n\ndef rules():\n return [\n *collect_rules(),\n *external_tool.rules(),\n *pex_environment.rules(),\n *adhoc_binaries.rules(),\n ]\n", "path": "src/python/pants/backend/python/util_rules/pex_cli.py"}]}
3,227
161
gh_patches_debug_3858
rasdani/github-patches
git_diff
carpentries__amy-337
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Some workshops at the database are missing the Organizer field --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `workshops/forms.py` Content: ``` 1 from django import forms 2 from django.forms import HiddenInput 3 from django.forms.models import modelform_factory 4 5 from crispy_forms.helper import FormHelper 6 from crispy_forms.layout import Submit 7 from selectable import forms as selectable 8 9 from workshops.models import Skill, Airport, Event, Task, Award, Person 10 from workshops import lookups 11 12 INSTRUCTOR_SEARCH_LEN = 10 # how many instrutors to return from a search by default 13 14 AUTOCOMPLETE_HELP_TEXT = ( 15 "Autocomplete field; type characters to view available options, " 16 "then select desired item from list." 17 ) 18 19 DATE_HELP_TEXT = "Select date using widget, or enter in YYYY-MM-DD format." 20 21 22 class BootstrapHelper(FormHelper): 23 form_class = 'form-horizontal' 24 label_class = 'col-lg-2' 25 field_class = 'col-lg-8' 26 27 def __init__(self, form=None): 28 super().__init__(form) 29 30 self.attrs['role'] = 'form' 31 self.inputs.append(Submit('submit', 'Submit')) 32 33 34 class BootstrapHelperWithoutForm(BootstrapHelper): 35 form_tag = False 36 37 bootstrap_helper = BootstrapHelper() 38 bootstrap_helper_without_form = BootstrapHelperWithoutForm() 39 40 41 class InstructorsForm(forms.Form): 42 '''Represent instructor matching form.''' 43 44 wanted = forms.IntegerField(label='Number Wanted', 45 initial=INSTRUCTOR_SEARCH_LEN, 46 min_value=1) 47 latitude = forms.FloatField(label='Latitude', 48 min_value=-90.0, 49 max_value=90.0, 50 required=False) 51 longitude = forms.FloatField(label='Longitude', 52 min_value=-180.0, 53 max_value=180.0, 54 required=False) 55 airport = selectable.AutoCompleteSelectField( 56 lookup_class=lookups.AirportLookup, 57 label='Airport', 58 required=False, 59 help_text=AUTOCOMPLETE_HELP_TEXT, 60 ) 61 62 def __init__(self, *args, **kwargs): 63 '''Build checkboxes for skills dynamically.''' 64 super(InstructorsForm, self).__init__(*args, **kwargs) 65 skills = Skill.objects.all() 66 for s in skills: 67 self.fields[s.name] = forms.BooleanField(label=s.name, required=False) 68 69 def clean(self): 70 cleaned_data = super(InstructorsForm, self).clean() 71 airport = cleaned_data.get('airport') 72 lat = cleaned_data.get('latitude') 73 long = cleaned_data.get('longitude') 74 75 if airport is None: 76 if lat is None or long is None: 77 raise forms.ValidationError( 78 'Must specify either an airport code or latitude/longitude') 79 else: 80 if lat is not None or long is not None: 81 raise forms.ValidationError( 82 'Cannot specify both an airport code and a ' 83 'latitude/longitude. Pick one or the other') 84 cleaned_data['latitude'] = airport.latitude 85 cleaned_data['longitude'] = airport.longitude 86 return cleaned_data 87 88 89 class PersonBulkAddForm(forms.Form): 90 '''Represent CSV upload form for bulk adding people.''' 91 92 file = forms.FileField() 93 94 95 class SearchForm(forms.Form): 96 '''Represent general searching form.''' 97 98 term = forms.CharField(label='term', 99 max_length=100) 100 in_sites = forms.BooleanField(label='in sites', 101 required=False, 102 initial=True) 103 in_events = forms.BooleanField(label='in events', 104 required=False, 105 initial=True) 106 in_persons = forms.BooleanField(label='in persons', 107 required=False, 108 initial=True) 109 110 111 class DebriefForm(forms.Form): 112 '''Represent general debrief form.''' 113 begin_date = forms.DateField( 114 label='Begin date as YYYY-MM-DD', 115 input_formats=['%Y-%m-%d', ] 116 ) 117 end_date = forms.DateField( 118 label='End date as YYYY-MD-DD', 119 input_formats=['%Y-%m-%d', ] 120 ) 121 122 123 class EventForm(forms.ModelForm): 124 125 site = selectable.AutoCompleteSelectField( 126 lookup_class=lookups.SiteLookup, 127 label='Site', 128 required=True, 129 help_text=AUTOCOMPLETE_HELP_TEXT, 130 widget=selectable.AutoComboboxSelectWidget, 131 ) 132 133 organizer = selectable.AutoCompleteSelectField( 134 lookup_class=lookups.SiteLookup, 135 label='Organizer', 136 required=True, 137 help_text=AUTOCOMPLETE_HELP_TEXT, 138 widget=selectable.AutoComboboxSelectWidget, 139 ) 140 141 def __init__(self, *args, **kwargs): 142 super().__init__(*args, **kwargs) 143 self.fields['start'].help_text = DATE_HELP_TEXT 144 self.fields['end'].help_text = DATE_HELP_TEXT 145 146 def clean_slug(self): 147 # Ensure slug is not an integer value for Event.get_by_ident 148 data = self.cleaned_data['slug'] 149 150 try: 151 int(data) 152 except ValueError: 153 pass 154 else: 155 raise forms.ValidationError("Slug must not be an integer-value.") 156 157 return data 158 159 class Meta: 160 model = Event 161 exclude = ('deleted', ) 162 163 164 class TaskForm(forms.ModelForm): 165 166 person = selectable.AutoCompleteSelectField( 167 lookup_class=lookups.PersonLookup, 168 label='Person', 169 required=True, 170 help_text=AUTOCOMPLETE_HELP_TEXT, 171 widget=selectable.AutoComboboxSelectWidget, 172 ) 173 174 def __init__(self, *args, **kwargs): 175 event = kwargs.pop('event', None) 176 super().__init__(*args, **kwargs) 177 if event: 178 self.instance.event = event 179 180 class Meta: 181 model = Task 182 exclude = ('event', 'deleted') 183 184 185 class TaskFullForm(TaskForm): 186 187 event = selectable.AutoCompleteSelectField( 188 lookup_class=lookups.EventLookup, 189 label='Event', 190 required=True, 191 help_text=AUTOCOMPLETE_HELP_TEXT, 192 widget=selectable.AutoComboboxSelectWidget, 193 ) 194 195 class Meta: 196 model = Task 197 exclude = ('deleted', ) 198 199 200 class PersonForm(forms.ModelForm): 201 202 airport = selectable.AutoCompleteSelectField( 203 lookup_class=lookups.AirportLookup, 204 label='Airport', 205 required=False, 206 help_text=AUTOCOMPLETE_HELP_TEXT, 207 widget=selectable.AutoComboboxSelectWidget, 208 ) 209 210 class Meta: 211 model = Person 212 # don't display the 'password', 'user_permissions', 'group_permissions' 213 # fields 214 # + reorder fields 215 fields = ['personal', 'middle', 'family', 'username', 'may_contact', 216 'email', 'gender', 'airport', 'github', 'twitter', 'url', 217 'notes', 'is_superuser'] 218 219 220 class BadgeAwardForm(forms.ModelForm): 221 222 person = selectable.AutoCompleteSelectField( 223 lookup_class=lookups.PersonLookup, 224 label='Person', 225 required=True, 226 help_text=AUTOCOMPLETE_HELP_TEXT, 227 widget=selectable.AutoComboboxSelectWidget, 228 ) 229 230 event = selectable.AutoCompleteSelectField( 231 lookup_class=lookups.EventLookup, 232 label='Event', 233 required=False, 234 help_text=AUTOCOMPLETE_HELP_TEXT, 235 widget=selectable.AutoComboboxSelectWidget, 236 ) 237 238 class Meta: 239 model = Award 240 fields = '__all__' 241 widgets = {'badge': HiddenInput} 242 243 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/workshops/forms.py b/workshops/forms.py --- a/workshops/forms.py +++ b/workshops/forms.py @@ -133,7 +133,7 @@ organizer = selectable.AutoCompleteSelectField( lookup_class=lookups.SiteLookup, label='Organizer', - required=True, + required=False, help_text=AUTOCOMPLETE_HELP_TEXT, widget=selectable.AutoComboboxSelectWidget, )
{"golden_diff": "diff --git a/workshops/forms.py b/workshops/forms.py\n--- a/workshops/forms.py\n+++ b/workshops/forms.py\n@@ -133,7 +133,7 @@\n organizer = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Organizer',\n- required=True,\n+ required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n", "issue": "Some workshops at the database are missing the Organizer field\n\n", "before_files": [{"content": "from django import forms\nfrom django.forms import HiddenInput\nfrom django.forms.models import modelform_factory\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom selectable import forms as selectable\n\nfrom workshops.models import Skill, Airport, Event, Task, Award, Person\nfrom workshops import lookups\n\nINSTRUCTOR_SEARCH_LEN = 10 # how many instrutors to return from a search by default\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\nDATE_HELP_TEXT = \"Select date using widget, or enter in YYYY-MM-DD format.\"\n\n\nclass BootstrapHelper(FormHelper):\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperWithoutForm(BootstrapHelper):\n form_tag = False\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_without_form = BootstrapHelperWithoutForm()\n\n\nclass InstructorsForm(forms.Form):\n '''Represent instructor matching form.'''\n\n wanted = forms.IntegerField(label='Number Wanted',\n initial=INSTRUCTOR_SEARCH_LEN,\n min_value=1)\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n )\n\n def __init__(self, *args, **kwargs):\n '''Build checkboxes for skills dynamically.'''\n super(InstructorsForm, self).__init__(*args, **kwargs)\n skills = Skill.objects.all()\n for s in skills:\n self.fields[s.name] = forms.BooleanField(label=s.name, required=False)\n\n def clean(self):\n cleaned_data = super(InstructorsForm, self).clean()\n airport = cleaned_data.get('airport')\n lat = cleaned_data.get('latitude')\n long = cleaned_data.get('longitude')\n\n if airport is None:\n if lat is None or long is None:\n raise forms.ValidationError(\n 'Must specify either an airport code or latitude/longitude')\n else:\n if lat is not None or long is not None:\n raise forms.ValidationError(\n 'Cannot specify both an airport code and a '\n 'latitude/longitude. Pick one or the other')\n cleaned_data['latitude'] = airport.latitude\n cleaned_data['longitude'] = airport.longitude\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_sites = forms.BooleanField(label='in sites',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n\n site = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Site',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n organizer = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Organizer',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['start'].help_text = DATE_HELP_TEXT\n self.fields['end'].help_text = DATE_HELP_TEXT\n\n def clean_slug(self):\n # Ensure slug is not an integer value for Event.get_by_ident\n data = self.cleaned_data['slug']\n\n try:\n int(data)\n except ValueError:\n pass\n else:\n raise forms.ValidationError(\"Slug must not be an integer-value.\")\n\n return data\n\n class Meta:\n model = Event\n exclude = ('deleted', )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n def __init__(self, *args, **kwargs):\n event = kwargs.pop('event', None)\n super().__init__(*args, **kwargs)\n if event:\n self.instance.event = event\n\n class Meta:\n model = Task\n exclude = ('event', 'deleted')\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n exclude = ('deleted', )\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions', 'group_permissions'\n # fields\n # + reorder fields\n fields = ['personal', 'middle', 'family', 'username', 'may_contact',\n 'email', 'gender', 'airport', 'github', 'twitter', 'url',\n 'notes', 'is_superuser']\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n", "path": "workshops/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.forms import HiddenInput\nfrom django.forms.models import modelform_factory\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom selectable import forms as selectable\n\nfrom workshops.models import Skill, Airport, Event, Task, Award, Person\nfrom workshops import lookups\n\nINSTRUCTOR_SEARCH_LEN = 10 # how many instrutors to return from a search by default\n\nAUTOCOMPLETE_HELP_TEXT = (\n \"Autocomplete field; type characters to view available options, \"\n \"then select desired item from list.\"\n)\n\nDATE_HELP_TEXT = \"Select date using widget, or enter in YYYY-MM-DD format.\"\n\n\nclass BootstrapHelper(FormHelper):\n form_class = 'form-horizontal'\n label_class = 'col-lg-2'\n field_class = 'col-lg-8'\n\n def __init__(self, form=None):\n super().__init__(form)\n\n self.attrs['role'] = 'form'\n self.inputs.append(Submit('submit', 'Submit'))\n\n\nclass BootstrapHelperWithoutForm(BootstrapHelper):\n form_tag = False\n\nbootstrap_helper = BootstrapHelper()\nbootstrap_helper_without_form = BootstrapHelperWithoutForm()\n\n\nclass InstructorsForm(forms.Form):\n '''Represent instructor matching form.'''\n\n wanted = forms.IntegerField(label='Number Wanted',\n initial=INSTRUCTOR_SEARCH_LEN,\n min_value=1)\n latitude = forms.FloatField(label='Latitude',\n min_value=-90.0,\n max_value=90.0,\n required=False)\n longitude = forms.FloatField(label='Longitude',\n min_value=-180.0,\n max_value=180.0,\n required=False)\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n )\n\n def __init__(self, *args, **kwargs):\n '''Build checkboxes for skills dynamically.'''\n super(InstructorsForm, self).__init__(*args, **kwargs)\n skills = Skill.objects.all()\n for s in skills:\n self.fields[s.name] = forms.BooleanField(label=s.name, required=False)\n\n def clean(self):\n cleaned_data = super(InstructorsForm, self).clean()\n airport = cleaned_data.get('airport')\n lat = cleaned_data.get('latitude')\n long = cleaned_data.get('longitude')\n\n if airport is None:\n if lat is None or long is None:\n raise forms.ValidationError(\n 'Must specify either an airport code or latitude/longitude')\n else:\n if lat is not None or long is not None:\n raise forms.ValidationError(\n 'Cannot specify both an airport code and a '\n 'latitude/longitude. Pick one or the other')\n cleaned_data['latitude'] = airport.latitude\n cleaned_data['longitude'] = airport.longitude\n return cleaned_data\n\n\nclass PersonBulkAddForm(forms.Form):\n '''Represent CSV upload form for bulk adding people.'''\n\n file = forms.FileField()\n\n\nclass SearchForm(forms.Form):\n '''Represent general searching form.'''\n\n term = forms.CharField(label='term',\n max_length=100)\n in_sites = forms.BooleanField(label='in sites',\n required=False,\n initial=True)\n in_events = forms.BooleanField(label='in events',\n required=False,\n initial=True)\n in_persons = forms.BooleanField(label='in persons',\n required=False,\n initial=True)\n\n\nclass DebriefForm(forms.Form):\n '''Represent general debrief form.'''\n begin_date = forms.DateField(\n label='Begin date as YYYY-MM-DD',\n input_formats=['%Y-%m-%d', ]\n )\n end_date = forms.DateField(\n label='End date as YYYY-MD-DD',\n input_formats=['%Y-%m-%d', ]\n )\n\n\nclass EventForm(forms.ModelForm):\n\n site = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Site',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n organizer = selectable.AutoCompleteSelectField(\n lookup_class=lookups.SiteLookup,\n label='Organizer',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['start'].help_text = DATE_HELP_TEXT\n self.fields['end'].help_text = DATE_HELP_TEXT\n\n def clean_slug(self):\n # Ensure slug is not an integer value for Event.get_by_ident\n data = self.cleaned_data['slug']\n\n try:\n int(data)\n except ValueError:\n pass\n else:\n raise forms.ValidationError(\"Slug must not be an integer-value.\")\n\n return data\n\n class Meta:\n model = Event\n exclude = ('deleted', )\n\n\nclass TaskForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n def __init__(self, *args, **kwargs):\n event = kwargs.pop('event', None)\n super().__init__(*args, **kwargs)\n if event:\n self.instance.event = event\n\n class Meta:\n model = Task\n exclude = ('event', 'deleted')\n\n\nclass TaskFullForm(TaskForm):\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Task\n exclude = ('deleted', )\n\n\nclass PersonForm(forms.ModelForm):\n\n airport = selectable.AutoCompleteSelectField(\n lookup_class=lookups.AirportLookup,\n label='Airport',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Person\n # don't display the 'password', 'user_permissions', 'group_permissions'\n # fields\n # + reorder fields\n fields = ['personal', 'middle', 'family', 'username', 'may_contact',\n 'email', 'gender', 'airport', 'github', 'twitter', 'url',\n 'notes', 'is_superuser']\n\n\nclass BadgeAwardForm(forms.ModelForm):\n\n person = selectable.AutoCompleteSelectField(\n lookup_class=lookups.PersonLookup,\n label='Person',\n required=True,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n event = selectable.AutoCompleteSelectField(\n lookup_class=lookups.EventLookup,\n label='Event',\n required=False,\n help_text=AUTOCOMPLETE_HELP_TEXT,\n widget=selectable.AutoComboboxSelectWidget,\n )\n\n class Meta:\n model = Award\n fields = '__all__'\n widgets = {'badge': HiddenInput}\n\n", "path": "workshops/forms.py"}]}
2,432
99
gh_patches_debug_42220
rasdani/github-patches
git_diff
joke2k__faker-1041
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Base provider methods are documented with the address provider. In the docs the [BaseProvider methods](https://github.com/joke2k/faker/blob/master/faker/providers/__init__.py) are grouped with the [address provider](https://github.com/joke2k/faker/blob/master/faker/providers/address/__init__.py). Here is the documentation: http://faker.readthedocs.io/en/master/providers/faker.providers.address.html I would expect those to be in their own category, and they have nothing to do with addresses. Documentation of utilities like random_choices Is there any online documentation of the utility functions like random_choices? I always end up looking at the source code when I forget what they're named. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/build_docs.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import print_function, unicode_literals 4 5 import os 6 import pprint 7 import sys 8 9 import six 10 11 DOCS_ROOT = os.path.abspath(os.path.join('..', 'docs')) 12 13 14 def write(fh, s): 15 return fh.write(s.encode('utf-8')) 16 17 18 def write_provider(fh, doc, provider, formatters, excludes=None): 19 20 if excludes is None: 21 excludes = [] 22 23 write(fh, '\n') 24 title = "``{0}``".format(doc.get_provider_name(provider)) 25 write(fh, '%s\n' % title) 26 write(fh, "-" * len(title)) 27 write(fh, '\n\n::\n') 28 29 for signature, example in formatters.items(): 30 if signature in excludes: 31 continue 32 try: 33 # `pprint` can't format sets of heterogenous types. 34 if not isinstance(example, set): 35 example = pprint.pformat(example, indent=4) 36 lines = six.text_type(example).expandtabs().splitlines() 37 except UnicodeEncodeError: 38 msg = 'error on "{0}" with value "{1}"'.format(signature, example) 39 raise Exception(msg) 40 write(fh, '\n') 41 write(fh, "\t{fake}\n{example}\n".format( 42 fake=signature, 43 example='\n'.join(['\t# ' + line for line in lines]), 44 )) 45 46 47 def write_docs(*args, **kwargs): 48 from faker import Faker, documentor 49 from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES 50 51 fake = Faker(locale=DEFAULT_LOCALE) 52 53 from faker.providers import BaseProvider 54 base_provider_formatters = [f for f in dir(BaseProvider)] 55 56 doc = documentor.Documentor(fake) 57 58 formatters = doc.get_formatters(with_args=True, with_defaults=True) 59 60 for provider, fakers in formatters: 61 provider_name = doc.get_provider_name(provider) 62 fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name) 63 with open(fname, 'wb') as fh: 64 write(fh, ':github_url: hide\n\n') 65 write_provider(fh, doc, provider, fakers) 66 67 with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh: 68 write(fh, ':github_url: hide\n\n') 69 write(fh, 'Providers\n') 70 write(fh, '=========\n') 71 write(fh, '.. toctree::\n') 72 write(fh, ' :maxdepth: 2\n\n') 73 [write(fh, ' providers/%s\n' % doc.get_provider_name(provider)) 74 for provider, fakers in formatters] 75 76 AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES) 77 for lang in AVAILABLE_LOCALES: 78 fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang) 79 with open(fname, 'wb') as fh: 80 write(fh, ':github_url: hide\n\n') 81 title = 'Language {0}\n'.format(lang) 82 write(fh, title) 83 write(fh, '=' * len(title)) 84 write(fh, '\n') 85 fake = Faker(locale=lang) 86 d = documentor.Documentor(fake) 87 88 for p, fs in d.get_formatters(with_args=True, with_defaults=True, 89 locale=lang, 90 excludes=base_provider_formatters): 91 write_provider(fh, d, p, fs) 92 93 with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh: 94 write(fh, ':github_url: hide\n\n') 95 write(fh, 'Locales\n') 96 write(fh, '=======\n') 97 write(fh, '.. toctree::\n') 98 write(fh, ' :maxdepth: 2\n\n') 99 [write(fh, ' locales/%s\n' % lang) for lang in AVAILABLE_LOCALES] 100 101 102 # wrappers for sphinx 103 def _main(app, *args, **kwargs): 104 return write_docs(*args, **kwargs) 105 106 107 def setup(app): 108 app.connect(str('builder-inited'), _main) 109 110 111 if __name__ == "__main__": 112 write_docs(*sys.argv[1:]) 113 ``` Path: `faker/documentor.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 import inspect 6 7 from faker import utils 8 9 from .compat import getargspec 10 11 12 class Documentor(object): 13 14 def __init__(self, generator): 15 """ 16 :param generator: a localized Generator with providers filled, 17 for which to write the documentation 18 :type generator: faker.Generator() 19 """ 20 self.generator = generator 21 self.max_name_len = 0 22 self.already_generated = [] 23 24 def get_formatters(self, locale=None, excludes=None, **kwargs): 25 26 self.max_name_len = 0 27 self.already_generated = [] if excludes is None else excludes[:] 28 formatters = [] 29 providers = self.generator.get_providers() 30 for provider in providers[::-1]: # reverse 31 if locale and provider.__lang__ != locale: 32 continue 33 formatters.append( 34 (provider, self.get_provider_formatters(provider, **kwargs)), 35 ) 36 return formatters 37 38 def get_provider_formatters(self, provider, prefix='fake.', 39 with_args=True, with_defaults=True): 40 41 formatters = {} 42 43 for name, method in inspect.getmembers(provider, inspect.ismethod): 44 # skip 'private' method and inherited methods 45 if name.startswith('_') or name in self.already_generated: 46 continue 47 48 arguments = [] 49 faker_args = [] 50 faker_kwargs = {} 51 52 if name == 'binary': 53 faker_kwargs['length'] = 1024 54 55 if with_args: 56 # retrieve all parameter 57 argspec = getargspec(method) 58 59 lst = [x for x in argspec.args if x not in ['self', 'cls']] 60 for i, arg in enumerate(lst): 61 62 if argspec.defaults and with_defaults: 63 64 try: 65 default = argspec.defaults[i] 66 if utils.is_string(default): 67 default = utils.quote(default) 68 else: 69 # TODO check default type 70 default = "{0}".format(default) 71 72 arg = "{0}={1}".format(arg, default) 73 74 except IndexError: 75 pass 76 77 arguments.append(arg) 78 if with_args == 'first': 79 break 80 81 if with_args != 'first': 82 if argspec.varargs: 83 arguments.append('*' + argspec.varargs) 84 if argspec.varkw: 85 arguments.append('**' + argspec.varkw) 86 87 # build fake method signature 88 signature = "{0}{1}({2})".format(prefix, 89 name, 90 ", ".join(arguments)) 91 92 # make a fake example 93 example = self.generator.format(name, *faker_args, **faker_kwargs) 94 95 formatters[signature] = example 96 97 self.max_name_len = max(self.max_name_len, len(signature)) 98 self.already_generated.append(name) 99 100 return formatters 101 102 @staticmethod 103 def get_provider_name(provider_class): 104 return provider_class.__provider__ 105 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/build_docs.py b/faker/build_docs.py --- a/faker/build_docs.py +++ b/faker/build_docs.py @@ -15,6 +15,12 @@ return fh.write(s.encode('utf-8')) +def write_base_provider(fh, doc, base_provider): + formatters = doc.get_provider_formatters(base_provider) + write(fh, ':github_url: hide\n\n') + write_provider(fh, doc, base_provider, formatters) + + def write_provider(fh, doc, provider, formatters, excludes=None): if excludes is None: @@ -47,16 +53,21 @@ def write_docs(*args, **kwargs): from faker import Faker, documentor from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES - - fake = Faker(locale=DEFAULT_LOCALE) - from faker.providers import BaseProvider - base_provider_formatters = [f for f in dir(BaseProvider)] + fake = Faker(locale=DEFAULT_LOCALE) doc = documentor.Documentor(fake) - formatters = doc.get_formatters(with_args=True, with_defaults=True) + # Write docs for fakers.providers.BaseProvider + base_provider = BaseProvider(fake) + fname = os.path.join(DOCS_ROOT, 'providers', 'BaseProvider.rst') + with open(fname, 'wb') as fh: + write_base_provider(fh, doc, base_provider) + # Write docs for default locale providers + base_provider_formatters = [f for f in dir(BaseProvider)] + formatters = doc.get_formatters(with_args=True, with_defaults=True, + excludes=base_provider_formatters) for provider, fakers in formatters: provider_name = doc.get_provider_name(provider) fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name) @@ -64,15 +75,18 @@ write(fh, ':github_url: hide\n\n') write_provider(fh, doc, provider, fakers) + # Write providers index page with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh: write(fh, ':github_url: hide\n\n') write(fh, 'Providers\n') write(fh, '=========\n') write(fh, '.. toctree::\n') write(fh, ' :maxdepth: 2\n\n') + write(fh, ' providers/BaseProvider\n') [write(fh, ' providers/%s\n' % doc.get_provider_name(provider)) for provider, fakers in formatters] + # Write docs for locale-specific providers AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES) for lang in AVAILABLE_LOCALES: fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang) @@ -90,6 +104,7 @@ excludes=base_provider_formatters): write_provider(fh, d, p, fs) + # Write locales index page with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh: write(fh, ':github_url: hide\n\n') write(fh, 'Locales\n') diff --git a/faker/documentor.py b/faker/documentor.py --- a/faker/documentor.py +++ b/faker/documentor.py @@ -22,7 +22,6 @@ self.already_generated = [] def get_formatters(self, locale=None, excludes=None, **kwargs): - self.max_name_len = 0 self.already_generated = [] if excludes is None else excludes[:] formatters = []
{"golden_diff": "diff --git a/faker/build_docs.py b/faker/build_docs.py\n--- a/faker/build_docs.py\n+++ b/faker/build_docs.py\n@@ -15,6 +15,12 @@\n return fh.write(s.encode('utf-8'))\n \n \n+def write_base_provider(fh, doc, base_provider):\n+ formatters = doc.get_provider_formatters(base_provider)\n+ write(fh, ':github_url: hide\\n\\n')\n+ write_provider(fh, doc, base_provider, formatters)\n+\n+\n def write_provider(fh, doc, provider, formatters, excludes=None):\n \n if excludes is None:\n@@ -47,16 +53,21 @@\n def write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n-\n- fake = Faker(locale=DEFAULT_LOCALE)\n-\n from faker.providers import BaseProvider\n- base_provider_formatters = [f for f in dir(BaseProvider)]\n \n+ fake = Faker(locale=DEFAULT_LOCALE)\n doc = documentor.Documentor(fake)\n \n- formatters = doc.get_formatters(with_args=True, with_defaults=True)\n+ # Write docs for fakers.providers.BaseProvider\n+ base_provider = BaseProvider(fake)\n+ fname = os.path.join(DOCS_ROOT, 'providers', 'BaseProvider.rst')\n+ with open(fname, 'wb') as fh:\n+ write_base_provider(fh, doc, base_provider)\n \n+ # Write docs for default locale providers\n+ base_provider_formatters = [f for f in dir(BaseProvider)]\n+ formatters = doc.get_formatters(with_args=True, with_defaults=True,\n+ excludes=base_provider_formatters)\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n@@ -64,15 +75,18 @@\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n \n+ # Write providers index page\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n+ write(fh, ' providers/BaseProvider\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n \n+ # Write docs for locale-specific providers\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n@@ -90,6 +104,7 @@\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n \n+ # Write locales index page\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\ndiff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -22,7 +22,6 @@\n self.already_generated = []\n \n def get_formatters(self, locale=None, excludes=None, **kwargs):\n-\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n", "issue": "Base provider methods are documented with the address provider.\nIn the docs the [BaseProvider methods](https://github.com/joke2k/faker/blob/master/faker/providers/__init__.py) are grouped with the [address provider](https://github.com/joke2k/faker/blob/master/faker/providers/address/__init__.py). Here is the documentation: http://faker.readthedocs.io/en/master/providers/faker.providers.address.html\r\n\r\nI would expect those to be in their own category, and they have nothing to do with addresses.\nDocumentation of utilities like random_choices\nIs there any online documentation of the utility functions like random_choices? I always end up looking at the source code when I forget what they're named.\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}, {"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\nfrom .compat import getargspec\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n faker_args = []\n faker_kwargs = {}\n\n if name == 'binary':\n faker_kwargs['length'] = 1024\n\n if with_args:\n # retrieve all parameter\n argspec = getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.varkw:\n arguments.append('**' + argspec.varkw)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function, unicode_literals\n\nimport os\nimport pprint\nimport sys\n\nimport six\n\nDOCS_ROOT = os.path.abspath(os.path.join('..', 'docs'))\n\n\ndef write(fh, s):\n return fh.write(s.encode('utf-8'))\n\n\ndef write_base_provider(fh, doc, base_provider):\n formatters = doc.get_provider_formatters(base_provider)\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, base_provider, formatters)\n\n\ndef write_provider(fh, doc, provider, formatters, excludes=None):\n\n if excludes is None:\n excludes = []\n\n write(fh, '\\n')\n title = \"``{0}``\".format(doc.get_provider_name(provider))\n write(fh, '%s\\n' % title)\n write(fh, \"-\" * len(title))\n write(fh, '\\n\\n::\\n')\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n # `pprint` can't format sets of heterogenous types.\n if not isinstance(example, set):\n example = pprint.pformat(example, indent=4)\n lines = six.text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n msg = 'error on \"{0}\" with value \"{1}\"'.format(signature, example)\n raise Exception(msg)\n write(fh, '\\n')\n write(fh, \"\\t{fake}\\n{example}\\n\".format(\n fake=signature,\n example='\\n'.join(['\\t# ' + line for line in lines]),\n ))\n\n\ndef write_docs(*args, **kwargs):\n from faker import Faker, documentor\n from faker.config import DEFAULT_LOCALE, AVAILABLE_LOCALES\n from faker.providers import BaseProvider\n\n fake = Faker(locale=DEFAULT_LOCALE)\n doc = documentor.Documentor(fake)\n\n # Write docs for fakers.providers.BaseProvider\n base_provider = BaseProvider(fake)\n fname = os.path.join(DOCS_ROOT, 'providers', 'BaseProvider.rst')\n with open(fname, 'wb') as fh:\n write_base_provider(fh, doc, base_provider)\n\n # Write docs for default locale providers\n base_provider_formatters = [f for f in dir(BaseProvider)]\n formatters = doc.get_formatters(with_args=True, with_defaults=True,\n excludes=base_provider_formatters)\n for provider, fakers in formatters:\n provider_name = doc.get_provider_name(provider)\n fname = os.path.join(DOCS_ROOT, 'providers', '%s.rst' % provider_name)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write_provider(fh, doc, provider, fakers)\n\n # Write providers index page\n with open(os.path.join(DOCS_ROOT, 'providers.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Providers\\n')\n write(fh, '=========\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n write(fh, ' providers/BaseProvider\\n')\n [write(fh, ' providers/%s\\n' % doc.get_provider_name(provider))\n for provider, fakers in formatters]\n\n # Write docs for locale-specific providers\n AVAILABLE_LOCALES = sorted(AVAILABLE_LOCALES)\n for lang in AVAILABLE_LOCALES:\n fname = os.path.join(DOCS_ROOT, 'locales', '%s.rst' % lang)\n with open(fname, 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n title = 'Language {0}\\n'.format(lang)\n write(fh, title)\n write(fh, '=' * len(title))\n write(fh, '\\n')\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True,\n locale=lang,\n excludes=base_provider_formatters):\n write_provider(fh, d, p, fs)\n\n # Write locales index page\n with open(os.path.join(DOCS_ROOT, 'locales.rst'), 'wb') as fh:\n write(fh, ':github_url: hide\\n\\n')\n write(fh, 'Locales\\n')\n write(fh, '=======\\n')\n write(fh, '.. toctree::\\n')\n write(fh, ' :maxdepth: 2\\n\\n')\n [write(fh, ' locales/%s\\n' % lang) for lang in AVAILABLE_LOCALES]\n\n\n# wrappers for sphinx\ndef _main(app, *args, **kwargs):\n return write_docs(*args, **kwargs)\n\n\ndef setup(app):\n app.connect(str('builder-inited'), _main)\n\n\nif __name__ == \"__main__\":\n write_docs(*sys.argv[1:])\n", "path": "faker/build_docs.py"}, {"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\nfrom .compat import getargspec\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n faker_args = []\n faker_kwargs = {}\n\n if name == 'binary':\n faker_kwargs['length'] = 1024\n\n if with_args:\n # retrieve all parameter\n argspec = getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.varkw:\n arguments.append('**' + argspec.varkw)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}]}
2,411
820
gh_patches_debug_20047
rasdani/github-patches
git_diff
urllib3__urllib3-418
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- This seems to fix the IncompleteRead errors I was getting I'm sometimes getting IncompleteRead errors when using twython which would bring down the connection. Found a fix online and it seems to have solved the issue so far. Fix came from: http://bobrochel.blogspot.co.nz/2010/11/bad-servers-chunked-encoding-and.html --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `urllib3/response.py` Content: ``` 1 # urllib3/response.py 2 # Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) 3 # 4 # This module is part of urllib3 and is released under 5 # the MIT License: http://www.opensource.org/licenses/mit-license.php 6 7 8 import zlib 9 import io 10 from socket import timeout as SocketTimeout 11 12 from ._collections import HTTPHeaderDict 13 from .exceptions import DecodeError, ReadTimeoutError 14 from .packages.six import string_types as basestring, binary_type 15 from .util import is_fp_closed 16 17 18 class DeflateDecoder(object): 19 20 def __init__(self): 21 self._first_try = True 22 self._data = binary_type() 23 self._obj = zlib.decompressobj() 24 25 def __getattr__(self, name): 26 return getattr(self._obj, name) 27 28 def decompress(self, data): 29 if not self._first_try: 30 return self._obj.decompress(data) 31 32 self._data += data 33 try: 34 return self._obj.decompress(data) 35 except zlib.error: 36 self._first_try = False 37 self._obj = zlib.decompressobj(-zlib.MAX_WBITS) 38 try: 39 return self.decompress(self._data) 40 finally: 41 self._data = None 42 43 44 def _get_decoder(mode): 45 if mode == 'gzip': 46 return zlib.decompressobj(16 + zlib.MAX_WBITS) 47 48 return DeflateDecoder() 49 50 51 class HTTPResponse(io.IOBase): 52 """ 53 HTTP Response container. 54 55 Backwards-compatible to httplib's HTTPResponse but the response ``body`` is 56 loaded and decoded on-demand when the ``data`` property is accessed. 57 58 Extra parameters for behaviour not present in httplib.HTTPResponse: 59 60 :param preload_content: 61 If True, the response's body will be preloaded during construction. 62 63 :param decode_content: 64 If True, attempts to decode specific content-encoding's based on headers 65 (like 'gzip' and 'deflate') will be skipped and raw data will be used 66 instead. 67 68 :param original_response: 69 When this HTTPResponse wrapper is generated from an httplib.HTTPResponse 70 object, it's convenient to include the original for debug purposes. It's 71 otherwise unused. 72 """ 73 74 CONTENT_DECODERS = ['gzip', 'deflate'] 75 REDIRECT_STATUSES = [301, 302, 303, 307, 308] 76 77 def __init__(self, body='', headers=None, status=0, version=0, reason=None, 78 strict=0, preload_content=True, decode_content=True, 79 original_response=None, pool=None, connection=None): 80 81 self.headers = HTTPHeaderDict() 82 if headers: 83 self.headers.update(headers) 84 self.status = status 85 self.version = version 86 self.reason = reason 87 self.strict = strict 88 self.decode_content = decode_content 89 90 self._decoder = None 91 self._body = None 92 self._fp = None 93 self._original_response = original_response 94 self._fp_bytes_read = 0 95 96 if body and isinstance(body, (basestring, binary_type)): 97 self._body = body 98 99 self._pool = pool 100 self._connection = connection 101 102 if hasattr(body, 'read'): 103 self._fp = body 104 105 if preload_content and not self._body: 106 self._body = self.read(decode_content=decode_content) 107 108 def get_redirect_location(self): 109 """ 110 Should we redirect and where to? 111 112 :returns: Truthy redirect location string if we got a redirect status 113 code and valid location. ``None`` if redirect status and no 114 location. ``False`` if not a redirect status code. 115 """ 116 if self.status in self.REDIRECT_STATUSES: 117 return self.headers.get('location') 118 119 return False 120 121 def release_conn(self): 122 if not self._pool or not self._connection: 123 return 124 125 self._pool._put_conn(self._connection) 126 self._connection = None 127 128 @property 129 def data(self): 130 # For backwords-compat with earlier urllib3 0.4 and earlier. 131 if self._body: 132 return self._body 133 134 if self._fp: 135 return self.read(cache_content=True) 136 137 def tell(self): 138 """ 139 Obtain the number of bytes pulled over the wire so far. May differ from 140 the amount of content returned by :meth:``HTTPResponse.read`` if bytes 141 are encoded on the wire (e.g, compressed). 142 """ 143 return self._fp_bytes_read 144 145 def read(self, amt=None, decode_content=None, cache_content=False): 146 """ 147 Similar to :meth:`httplib.HTTPResponse.read`, but with two additional 148 parameters: ``decode_content`` and ``cache_content``. 149 150 :param amt: 151 How much of the content to read. If specified, caching is skipped 152 because it doesn't make sense to cache partial content as the full 153 response. 154 155 :param decode_content: 156 If True, will attempt to decode the body based on the 157 'content-encoding' header. 158 159 :param cache_content: 160 If True, will save the returned data such that the same result is 161 returned despite of the state of the underlying file object. This 162 is useful if you want the ``.data`` property to continue working 163 after having ``.read()`` the file object. (Overridden if ``amt`` is 164 set.) 165 """ 166 # Note: content-encoding value should be case-insensitive, per RFC 7230 167 # Section 3.2 168 content_encoding = self.headers.get('content-encoding', '').lower() 169 if self._decoder is None: 170 if content_encoding in self.CONTENT_DECODERS: 171 self._decoder = _get_decoder(content_encoding) 172 if decode_content is None: 173 decode_content = self.decode_content 174 175 if self._fp is None: 176 return 177 178 flush_decoder = False 179 180 try: 181 try: 182 if amt is None: 183 # cStringIO doesn't like amt=None 184 data = self._fp.read() 185 flush_decoder = True 186 else: 187 cache_content = False 188 data = self._fp.read(amt) 189 if amt != 0 and not data: # Platform-specific: Buggy versions of Python. 190 # Close the connection when no data is returned 191 # 192 # This is redundant to what httplib/http.client _should_ 193 # already do. However, versions of python released before 194 # December 15, 2012 (http://bugs.python.org/issue16298) do 195 # not properly close the connection in all cases. There is 196 # no harm in redundantly calling close. 197 self._fp.close() 198 flush_decoder = True 199 200 except SocketTimeout: 201 # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but 202 # there is yet no clean way to get at it from this context. 203 raise ReadTimeoutError(self._pool, None, 'Read timed out.') 204 205 self._fp_bytes_read += len(data) 206 207 try: 208 if decode_content and self._decoder: 209 data = self._decoder.decompress(data) 210 except (IOError, zlib.error) as e: 211 raise DecodeError( 212 "Received response with content-encoding: %s, but " 213 "failed to decode it." % content_encoding, e) 214 215 if flush_decoder and decode_content and self._decoder: 216 buf = self._decoder.decompress(binary_type()) 217 data += buf + self._decoder.flush() 218 219 if cache_content: 220 self._body = data 221 222 return data 223 224 finally: 225 if self._original_response and self._original_response.isclosed(): 226 self.release_conn() 227 228 def stream(self, amt=2**16, decode_content=None): 229 """ 230 A generator wrapper for the read() method. A call will block until 231 ``amt`` bytes have been read from the connection or until the 232 connection is closed. 233 234 :param amt: 235 How much of the content to read. The generator will return up to 236 much data per iteration, but may return less. This is particularly 237 likely when using compressed data. However, the empty string will 238 never be returned. 239 240 :param decode_content: 241 If True, will attempt to decode the body based on the 242 'content-encoding' header. 243 """ 244 while not is_fp_closed(self._fp): 245 data = self.read(amt=amt, decode_content=decode_content) 246 247 if data: 248 yield data 249 250 @classmethod 251 def from_httplib(ResponseCls, r, **response_kw): 252 """ 253 Given an :class:`httplib.HTTPResponse` instance ``r``, return a 254 corresponding :class:`urllib3.response.HTTPResponse` object. 255 256 Remaining parameters are passed to the HTTPResponse constructor, along 257 with ``original_response=r``. 258 """ 259 260 headers = HTTPHeaderDict() 261 for k, v in r.getheaders(): 262 headers.add(k, v) 263 264 # HTTPResponse objects in Python 3 don't have a .strict attribute 265 strict = getattr(r, 'strict', 0) 266 return ResponseCls(body=r, 267 headers=headers, 268 status=r.status, 269 version=r.version, 270 reason=r.reason, 271 strict=strict, 272 original_response=r, 273 **response_kw) 274 275 # Backwards-compatibility methods for httplib.HTTPResponse 276 def getheaders(self): 277 return self.headers 278 279 def getheader(self, name, default=None): 280 return self.headers.get(name, default) 281 282 # Overrides from io.IOBase 283 def close(self): 284 if not self.closed: 285 self._fp.close() 286 287 @property 288 def closed(self): 289 if self._fp is None: 290 return True 291 elif hasattr(self._fp, 'closed'): 292 return self._fp.closed 293 elif hasattr(self._fp, 'isclosed'): # Python 2 294 return self._fp.isclosed() 295 else: 296 return True 297 298 def fileno(self): 299 if self._fp is None: 300 raise IOError("HTTPResponse has no file to get a fileno from") 301 elif hasattr(self._fp, "fileno"): 302 return self._fp.fileno() 303 else: 304 raise IOError("The file-like object this HTTPResponse is wrapped " 305 "around has no file descriptor") 306 307 def flush(self): 308 if self._fp is not None and hasattr(self._fp, 'flush'): 309 return self._fp.flush() 310 311 def readable(self): 312 return True 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/urllib3/response.py b/urllib3/response.py --- a/urllib3/response.py +++ b/urllib3/response.py @@ -10,9 +10,10 @@ from socket import timeout as SocketTimeout from ._collections import HTTPHeaderDict -from .exceptions import DecodeError, ReadTimeoutError +from .exceptions import ConnectionError, DecodeError, ReadTimeoutError from .packages.six import string_types as basestring, binary_type from .util import is_fp_closed +from .connection import HTTPException class DeflateDecoder(object): @@ -202,6 +203,10 @@ # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') + except HTTPException as e: + # This includes IncompleteRead. + raise ConnectionError('Connection failed: %r' % e, e) + self._fp_bytes_read += len(data) try:
{"golden_diff": "diff --git a/urllib3/response.py b/urllib3/response.py\n--- a/urllib3/response.py\n+++ b/urllib3/response.py\n@@ -10,9 +10,10 @@\n from socket import timeout as SocketTimeout\n \n from ._collections import HTTPHeaderDict\n-from .exceptions import DecodeError, ReadTimeoutError\n+from .exceptions import ConnectionError, DecodeError, ReadTimeoutError\n from .packages.six import string_types as basestring, binary_type\n from .util import is_fp_closed\n+from .connection import HTTPException\n \n \n class DeflateDecoder(object):\n@@ -202,6 +203,10 @@\n # there is yet no clean way to get at it from this context.\n raise ReadTimeoutError(self._pool, None, 'Read timed out.')\n \n+ except HTTPException as e:\n+ # This includes IncompleteRead.\n+ raise ConnectionError('Connection failed: %r' % e, e)\n+\n self._fp_bytes_read += len(data)\n \n try:\n", "issue": "This seems to fix the IncompleteRead errors I was getting\nI'm sometimes getting IncompleteRead errors when using twython which would bring down the connection. Found a fix online and it seems to have solved the issue so far.\n\nFix came from:\nhttp://bobrochel.blogspot.co.nz/2010/11/bad-servers-chunked-encoding-and.html\n\n", "before_files": [{"content": "# urllib3/response.py\n# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n#\n# This module is part of urllib3 and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\nimport zlib\nimport io\nfrom socket import timeout as SocketTimeout\n\nfrom ._collections import HTTPHeaderDict\nfrom .exceptions import DecodeError, ReadTimeoutError\nfrom .packages.six import string_types as basestring, binary_type\nfrom .util import is_fp_closed\n\n\nclass DeflateDecoder(object):\n\n def __init__(self):\n self._first_try = True\n self._data = binary_type()\n self._obj = zlib.decompressobj()\n\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n def decompress(self, data):\n if not self._first_try:\n return self._obj.decompress(data)\n\n self._data += data\n try:\n return self._obj.decompress(data)\n except zlib.error:\n self._first_try = False\n self._obj = zlib.decompressobj(-zlib.MAX_WBITS)\n try:\n return self.decompress(self._data)\n finally:\n self._data = None\n\n\ndef _get_decoder(mode):\n if mode == 'gzip':\n return zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n return DeflateDecoder()\n\n\nclass HTTPResponse(io.IOBase):\n \"\"\"\n HTTP Response container.\n\n Backwards-compatible to httplib's HTTPResponse but the response ``body`` is\n loaded and decoded on-demand when the ``data`` property is accessed.\n\n Extra parameters for behaviour not present in httplib.HTTPResponse:\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, attempts to decode specific content-encoding's based on headers\n (like 'gzip' and 'deflate') will be skipped and raw data will be used\n instead.\n\n :param original_response:\n When this HTTPResponse wrapper is generated from an httplib.HTTPResponse\n object, it's convenient to include the original for debug purposes. It's\n otherwise unused.\n \"\"\"\n\n CONTENT_DECODERS = ['gzip', 'deflate']\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n def __init__(self, body='', headers=None, status=0, version=0, reason=None,\n strict=0, preload_content=True, decode_content=True,\n original_response=None, pool=None, connection=None):\n\n self.headers = HTTPHeaderDict()\n if headers:\n self.headers.update(headers)\n self.status = status\n self.version = version\n self.reason = reason\n self.strict = strict\n self.decode_content = decode_content\n\n self._decoder = None\n self._body = None\n self._fp = None\n self._original_response = original_response\n self._fp_bytes_read = 0\n\n if body and isinstance(body, (basestring, binary_type)):\n self._body = body\n\n self._pool = pool\n self._connection = connection\n\n if hasattr(body, 'read'):\n self._fp = body\n\n if preload_content and not self._body:\n self._body = self.read(decode_content=decode_content)\n\n def get_redirect_location(self):\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get('location')\n\n return False\n\n def release_conn(self):\n if not self._pool or not self._connection:\n return\n\n self._pool._put_conn(self._connection)\n self._connection = None\n\n @property\n def data(self):\n # For backwords-compat with earlier urllib3 0.4 and earlier.\n if self._body:\n return self._body\n\n if self._fp:\n return self.read(cache_content=True)\n\n def tell(self):\n \"\"\"\n Obtain the number of bytes pulled over the wire so far. May differ from\n the amount of content returned by :meth:``HTTPResponse.read`` if bytes\n are encoded on the wire (e.g, compressed).\n \"\"\"\n return self._fp_bytes_read\n\n def read(self, amt=None, decode_content=None, cache_content=False):\n \"\"\"\n Similar to :meth:`httplib.HTTPResponse.read`, but with two additional\n parameters: ``decode_content`` and ``cache_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param cache_content:\n If True, will save the returned data such that the same result is\n returned despite of the state of the underlying file object. This\n is useful if you want the ``.data`` property to continue working\n after having ``.read()`` the file object. (Overridden if ``amt`` is\n set.)\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get('content-encoding', '').lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n if decode_content is None:\n decode_content = self.decode_content\n\n if self._fp is None:\n return\n\n flush_decoder = False\n\n try:\n try:\n if amt is None:\n # cStringIO doesn't like amt=None\n data = self._fp.read()\n flush_decoder = True\n else:\n cache_content = False\n data = self._fp.read(amt)\n if amt != 0 and not data: # Platform-specific: Buggy versions of Python.\n # Close the connection when no data is returned\n #\n # This is redundant to what httplib/http.client _should_\n # already do. However, versions of python released before\n # December 15, 2012 (http://bugs.python.org/issue16298) do\n # not properly close the connection in all cases. There is\n # no harm in redundantly calling close.\n self._fp.close()\n flush_decoder = True\n\n except SocketTimeout:\n # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but\n # there is yet no clean way to get at it from this context.\n raise ReadTimeoutError(self._pool, None, 'Read timed out.')\n\n self._fp_bytes_read += len(data)\n\n try:\n if decode_content and self._decoder:\n data = self._decoder.decompress(data)\n except (IOError, zlib.error) as e:\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding, e)\n\n if flush_decoder and decode_content and self._decoder:\n buf = self._decoder.decompress(binary_type())\n data += buf + self._decoder.flush()\n\n if cache_content:\n self._body = data\n\n return data\n\n finally:\n if self._original_response and self._original_response.isclosed():\n self.release_conn()\n\n def stream(self, amt=2**16, decode_content=None):\n \"\"\"\n A generator wrapper for the read() method. A call will block until\n ``amt`` bytes have been read from the connection or until the\n connection is closed.\n\n :param amt:\n How much of the content to read. The generator will return up to\n much data per iteration, but may return less. This is particularly\n likely when using compressed data. However, the empty string will\n never be returned.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n while not is_fp_closed(self._fp):\n data = self.read(amt=amt, decode_content=decode_content)\n\n if data:\n yield data\n\n @classmethod\n def from_httplib(ResponseCls, r, **response_kw):\n \"\"\"\n Given an :class:`httplib.HTTPResponse` instance ``r``, return a\n corresponding :class:`urllib3.response.HTTPResponse` object.\n\n Remaining parameters are passed to the HTTPResponse constructor, along\n with ``original_response=r``.\n \"\"\"\n\n headers = HTTPHeaderDict()\n for k, v in r.getheaders():\n headers.add(k, v)\n\n # HTTPResponse objects in Python 3 don't have a .strict attribute\n strict = getattr(r, 'strict', 0)\n return ResponseCls(body=r,\n headers=headers,\n status=r.status,\n version=r.version,\n reason=r.reason,\n strict=strict,\n original_response=r,\n **response_kw)\n\n # Backwards-compatibility methods for httplib.HTTPResponse\n def getheaders(self):\n return self.headers\n\n def getheader(self, name, default=None):\n return self.headers.get(name, default)\n\n # Overrides from io.IOBase\n def close(self):\n if not self.closed:\n self._fp.close()\n\n @property\n def closed(self):\n if self._fp is None:\n return True\n elif hasattr(self._fp, 'closed'):\n return self._fp.closed\n elif hasattr(self._fp, 'isclosed'): # Python 2\n return self._fp.isclosed()\n else:\n return True\n\n def fileno(self):\n if self._fp is None:\n raise IOError(\"HTTPResponse has no file to get a fileno from\")\n elif hasattr(self._fp, \"fileno\"):\n return self._fp.fileno()\n else:\n raise IOError(\"The file-like object this HTTPResponse is wrapped \"\n \"around has no file descriptor\")\n\n def flush(self):\n if self._fp is not None and hasattr(self._fp, 'flush'):\n return self._fp.flush()\n\n def readable(self):\n return True\n", "path": "urllib3/response.py"}], "after_files": [{"content": "# urllib3/response.py\n# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)\n#\n# This module is part of urllib3 and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\nimport zlib\nimport io\nfrom socket import timeout as SocketTimeout\n\nfrom ._collections import HTTPHeaderDict\nfrom .exceptions import ConnectionError, DecodeError, ReadTimeoutError\nfrom .packages.six import string_types as basestring, binary_type\nfrom .util import is_fp_closed\nfrom .connection import HTTPException\n\n\nclass DeflateDecoder(object):\n\n def __init__(self):\n self._first_try = True\n self._data = binary_type()\n self._obj = zlib.decompressobj()\n\n def __getattr__(self, name):\n return getattr(self._obj, name)\n\n def decompress(self, data):\n if not self._first_try:\n return self._obj.decompress(data)\n\n self._data += data\n try:\n return self._obj.decompress(data)\n except zlib.error:\n self._first_try = False\n self._obj = zlib.decompressobj(-zlib.MAX_WBITS)\n try:\n return self.decompress(self._data)\n finally:\n self._data = None\n\n\ndef _get_decoder(mode):\n if mode == 'gzip':\n return zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n return DeflateDecoder()\n\n\nclass HTTPResponse(io.IOBase):\n \"\"\"\n HTTP Response container.\n\n Backwards-compatible to httplib's HTTPResponse but the response ``body`` is\n loaded and decoded on-demand when the ``data`` property is accessed.\n\n Extra parameters for behaviour not present in httplib.HTTPResponse:\n\n :param preload_content:\n If True, the response's body will be preloaded during construction.\n\n :param decode_content:\n If True, attempts to decode specific content-encoding's based on headers\n (like 'gzip' and 'deflate') will be skipped and raw data will be used\n instead.\n\n :param original_response:\n When this HTTPResponse wrapper is generated from an httplib.HTTPResponse\n object, it's convenient to include the original for debug purposes. It's\n otherwise unused.\n \"\"\"\n\n CONTENT_DECODERS = ['gzip', 'deflate']\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n def __init__(self, body='', headers=None, status=0, version=0, reason=None,\n strict=0, preload_content=True, decode_content=True,\n original_response=None, pool=None, connection=None):\n\n self.headers = HTTPHeaderDict()\n if headers:\n self.headers.update(headers)\n self.status = status\n self.version = version\n self.reason = reason\n self.strict = strict\n self.decode_content = decode_content\n\n self._decoder = None\n self._body = None\n self._fp = None\n self._original_response = original_response\n self._fp_bytes_read = 0\n\n if body and isinstance(body, (basestring, binary_type)):\n self._body = body\n\n self._pool = pool\n self._connection = connection\n\n if hasattr(body, 'read'):\n self._fp = body\n\n if preload_content and not self._body:\n self._body = self.read(decode_content=decode_content)\n\n def get_redirect_location(self):\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get('location')\n\n return False\n\n def release_conn(self):\n if not self._pool or not self._connection:\n return\n\n self._pool._put_conn(self._connection)\n self._connection = None\n\n @property\n def data(self):\n # For backwords-compat with earlier urllib3 0.4 and earlier.\n if self._body:\n return self._body\n\n if self._fp:\n return self.read(cache_content=True)\n\n def tell(self):\n \"\"\"\n Obtain the number of bytes pulled over the wire so far. May differ from\n the amount of content returned by :meth:``HTTPResponse.read`` if bytes\n are encoded on the wire (e.g, compressed).\n \"\"\"\n return self._fp_bytes_read\n\n def read(self, amt=None, decode_content=None, cache_content=False):\n \"\"\"\n Similar to :meth:`httplib.HTTPResponse.read`, but with two additional\n parameters: ``decode_content`` and ``cache_content``.\n\n :param amt:\n How much of the content to read. If specified, caching is skipped\n because it doesn't make sense to cache partial content as the full\n response.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n\n :param cache_content:\n If True, will save the returned data such that the same result is\n returned despite of the state of the underlying file object. This\n is useful if you want the ``.data`` property to continue working\n after having ``.read()`` the file object. (Overridden if ``amt`` is\n set.)\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get('content-encoding', '').lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n if decode_content is None:\n decode_content = self.decode_content\n\n if self._fp is None:\n return\n\n flush_decoder = False\n\n try:\n try:\n if amt is None:\n # cStringIO doesn't like amt=None\n data = self._fp.read()\n flush_decoder = True\n else:\n cache_content = False\n data = self._fp.read(amt)\n if amt != 0 and not data: # Platform-specific: Buggy versions of Python.\n # Close the connection when no data is returned\n #\n # This is redundant to what httplib/http.client _should_\n # already do. However, versions of python released before\n # December 15, 2012 (http://bugs.python.org/issue16298) do\n # not properly close the connection in all cases. There is\n # no harm in redundantly calling close.\n self._fp.close()\n flush_decoder = True\n\n except SocketTimeout:\n # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but\n # there is yet no clean way to get at it from this context.\n raise ReadTimeoutError(self._pool, None, 'Read timed out.')\n\n except HTTPException as e:\n # This includes IncompleteRead.\n raise ConnectionError('Connection failed: %r' % e, e)\n\n self._fp_bytes_read += len(data)\n\n try:\n if decode_content and self._decoder:\n data = self._decoder.decompress(data)\n except (IOError, zlib.error) as e:\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding, e)\n\n if flush_decoder and decode_content and self._decoder:\n buf = self._decoder.decompress(binary_type())\n data += buf + self._decoder.flush()\n\n if cache_content:\n self._body = data\n\n return data\n\n finally:\n if self._original_response and self._original_response.isclosed():\n self.release_conn()\n\n def stream(self, amt=2**16, decode_content=None):\n \"\"\"\n A generator wrapper for the read() method. A call will block until\n ``amt`` bytes have been read from the connection or until the\n connection is closed.\n\n :param amt:\n How much of the content to read. The generator will return up to\n much data per iteration, but may return less. This is particularly\n likely when using compressed data. However, the empty string will\n never be returned.\n\n :param decode_content:\n If True, will attempt to decode the body based on the\n 'content-encoding' header.\n \"\"\"\n while not is_fp_closed(self._fp):\n data = self.read(amt=amt, decode_content=decode_content)\n\n if data:\n yield data\n\n @classmethod\n def from_httplib(ResponseCls, r, **response_kw):\n \"\"\"\n Given an :class:`httplib.HTTPResponse` instance ``r``, return a\n corresponding :class:`urllib3.response.HTTPResponse` object.\n\n Remaining parameters are passed to the HTTPResponse constructor, along\n with ``original_response=r``.\n \"\"\"\n\n headers = HTTPHeaderDict()\n for k, v in r.getheaders():\n headers.add(k, v)\n\n # HTTPResponse objects in Python 3 don't have a .strict attribute\n strict = getattr(r, 'strict', 0)\n return ResponseCls(body=r,\n headers=headers,\n status=r.status,\n version=r.version,\n reason=r.reason,\n strict=strict,\n original_response=r,\n **response_kw)\n\n # Backwards-compatibility methods for httplib.HTTPResponse\n def getheaders(self):\n return self.headers\n\n def getheader(self, name, default=None):\n return self.headers.get(name, default)\n\n # Overrides from io.IOBase\n def close(self):\n if not self.closed:\n self._fp.close()\n\n @property\n def closed(self):\n if self._fp is None:\n return True\n elif hasattr(self._fp, 'closed'):\n return self._fp.closed\n elif hasattr(self._fp, 'isclosed'): # Python 2\n return self._fp.isclosed()\n else:\n return True\n\n def fileno(self):\n if self._fp is None:\n raise IOError(\"HTTPResponse has no file to get a fileno from\")\n elif hasattr(self._fp, \"fileno\"):\n return self._fp.fileno()\n else:\n raise IOError(\"The file-like object this HTTPResponse is wrapped \"\n \"around has no file descriptor\")\n\n def flush(self):\n if self._fp is not None and hasattr(self._fp, 'flush'):\n return self._fp.flush()\n\n def readable(self):\n return True\n", "path": "urllib3/response.py"}]}
3,480
226
gh_patches_debug_2455
rasdani/github-patches
git_diff
ansible-collections__community.aws-1207
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ec2_customer_gateway: bgp_asn is not required ### Summary The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter. It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method: ``` def ensure_cgw_present(self, bgp_asn, ip_address): if not bgp_asn: bgp_asn = 65000 response = self.ec2.create_customer_gateway( DryRun=False, Type='ipsec.1', PublicIp=ip_address, BgpAsn=bgp_asn, ) return response ### Issue Type Documentation Report ### Component Name ec2_customer_gateway ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.12.4] config file = None configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] jinja version = 3.1.1 libyaml = True ``` ### Collection Versions ```console (paste below) $ ansible-galaxy collection list ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment main branch, as of 2022-04-18. ### Additional Information Suggested rewording: ``` options: bgp_asn: description: - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000. type: int ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/modules/ec2_customer_gateway.py` Content: ``` 1 #!/usr/bin/python 2 # 3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 4 5 from __future__ import absolute_import, division, print_function 6 __metaclass__ = type 7 8 9 DOCUMENTATION = ''' 10 --- 11 module: ec2_customer_gateway 12 version_added: 1.0.0 13 short_description: Manage an AWS customer gateway 14 description: 15 - Manage an AWS customer gateway. 16 author: Michael Baydoun (@MichaelBaydoun) 17 notes: 18 - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the 19 first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent 20 requests do not create new customer gateway resources. 21 - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use 22 customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details. 23 options: 24 bgp_asn: 25 description: 26 - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). 27 type: int 28 ip_address: 29 description: 30 - Internet-routable IP address for customers gateway, must be a static address. 31 required: true 32 type: str 33 name: 34 description: 35 - Name of the customer gateway. 36 required: true 37 type: str 38 routing: 39 description: 40 - The type of routing. 41 choices: ['static', 'dynamic'] 42 default: dynamic 43 type: str 44 state: 45 description: 46 - Create or terminate the Customer Gateway. 47 default: present 48 choices: [ 'present', 'absent' ] 49 type: str 50 extends_documentation_fragment: 51 - amazon.aws.aws 52 - amazon.aws.ec2 53 54 ''' 55 56 EXAMPLES = ''' 57 - name: Create Customer Gateway 58 community.aws.ec2_customer_gateway: 59 bgp_asn: 12345 60 ip_address: 1.2.3.4 61 name: IndianapolisOffice 62 region: us-east-1 63 register: cgw 64 65 - name: Delete Customer Gateway 66 community.aws.ec2_customer_gateway: 67 ip_address: 1.2.3.4 68 name: IndianapolisOffice 69 state: absent 70 region: us-east-1 71 register: cgw 72 ''' 73 74 RETURN = ''' 75 gateway.customer_gateways: 76 description: details about the gateway that was created. 77 returned: success 78 type: complex 79 contains: 80 bgp_asn: 81 description: The Border Gateway Autonomous System Number. 82 returned: when exists and gateway is available. 83 sample: 65123 84 type: str 85 customer_gateway_id: 86 description: gateway id assigned by amazon. 87 returned: when exists and gateway is available. 88 sample: cgw-cb6386a2 89 type: str 90 ip_address: 91 description: ip address of your gateway device. 92 returned: when exists and gateway is available. 93 sample: 1.2.3.4 94 type: str 95 state: 96 description: state of gateway. 97 returned: when gateway exists and is available. 98 sample: available 99 type: str 100 tags: 101 description: Any tags on the gateway. 102 returned: when gateway exists and is available, and when tags exist. 103 type: list 104 type: 105 description: encryption type. 106 returned: when gateway exists and is available. 107 sample: ipsec.1 108 type: str 109 ''' 110 111 try: 112 import botocore 113 except ImportError: 114 pass # Handled by AnsibleAWSModule 115 116 from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict 117 118 from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule 119 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry 120 121 122 class Ec2CustomerGatewayManager: 123 124 def __init__(self, module): 125 self.module = module 126 127 try: 128 self.ec2 = module.client('ec2') 129 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: 130 module.fail_json_aws(e, msg='Failed to connect to AWS') 131 132 @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState']) 133 def ensure_cgw_absent(self, gw_id): 134 response = self.ec2.delete_customer_gateway( 135 DryRun=False, 136 CustomerGatewayId=gw_id 137 ) 138 return response 139 140 def ensure_cgw_present(self, bgp_asn, ip_address): 141 if not bgp_asn: 142 bgp_asn = 65000 143 response = self.ec2.create_customer_gateway( 144 DryRun=False, 145 Type='ipsec.1', 146 PublicIp=ip_address, 147 BgpAsn=bgp_asn, 148 ) 149 return response 150 151 def tag_cgw_name(self, gw_id, name): 152 response = self.ec2.create_tags( 153 DryRun=False, 154 Resources=[ 155 gw_id, 156 ], 157 Tags=[ 158 { 159 'Key': 'Name', 160 'Value': name 161 }, 162 ] 163 ) 164 return response 165 166 def describe_gateways(self, ip_address): 167 response = self.ec2.describe_customer_gateways( 168 DryRun=False, 169 Filters=[ 170 { 171 'Name': 'state', 172 'Values': [ 173 'available', 174 ] 175 }, 176 { 177 'Name': 'ip-address', 178 'Values': [ 179 ip_address, 180 ] 181 } 182 ] 183 ) 184 return response 185 186 187 def main(): 188 argument_spec = dict( 189 bgp_asn=dict(required=False, type='int'), 190 ip_address=dict(required=True), 191 name=dict(required=True), 192 routing=dict(default='dynamic', choices=['dynamic', 'static']), 193 state=dict(default='present', choices=['present', 'absent']), 194 ) 195 196 module = AnsibleAWSModule( 197 argument_spec=argument_spec, 198 supports_check_mode=True, 199 required_if=[ 200 ('routing', 'dynamic', ['bgp_asn']) 201 ] 202 ) 203 204 gw_mgr = Ec2CustomerGatewayManager(module) 205 206 name = module.params.get('name') 207 208 existing = gw_mgr.describe_gateways(module.params['ip_address']) 209 210 results = dict(changed=False) 211 if module.params['state'] == 'present': 212 if existing['CustomerGateways']: 213 existing['CustomerGateway'] = existing['CustomerGateways'][0] 214 results['gateway'] = existing 215 if existing['CustomerGateway']['Tags']: 216 tag_array = existing['CustomerGateway']['Tags'] 217 for key, value in enumerate(tag_array): 218 if value['Key'] == 'Name': 219 current_name = value['Value'] 220 if current_name != name: 221 results['name'] = gw_mgr.tag_cgw_name( 222 results['gateway']['CustomerGateway']['CustomerGatewayId'], 223 module.params['name'], 224 ) 225 results['changed'] = True 226 else: 227 if not module.check_mode: 228 results['gateway'] = gw_mgr.ensure_cgw_present( 229 module.params['bgp_asn'], 230 module.params['ip_address'], 231 ) 232 results['name'] = gw_mgr.tag_cgw_name( 233 results['gateway']['CustomerGateway']['CustomerGatewayId'], 234 module.params['name'], 235 ) 236 results['changed'] = True 237 238 elif module.params['state'] == 'absent': 239 if existing['CustomerGateways']: 240 existing['CustomerGateway'] = existing['CustomerGateways'][0] 241 results['gateway'] = existing 242 if not module.check_mode: 243 results['gateway'] = gw_mgr.ensure_cgw_absent( 244 existing['CustomerGateway']['CustomerGatewayId'] 245 ) 246 results['changed'] = True 247 248 pretty_results = camel_dict_to_snake_dict(results) 249 module.exit_json(**pretty_results) 250 251 252 if __name__ == '__main__': 253 main() 254 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py --- a/plugins/modules/ec2_customer_gateway.py +++ b/plugins/modules/ec2_customer_gateway.py @@ -23,7 +23,8 @@ options: bgp_asn: description: - - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present). + - Border Gateway Protocol (BGP) Autonomous System Number (ASN). + - Defaults to C(65000) if not specified when I(state=present). type: int ip_address: description:
{"golden_diff": "diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py\n--- a/plugins/modules/ec2_customer_gateway.py\n+++ b/plugins/modules/ec2_customer_gateway.py\n@@ -23,7 +23,8 @@\n options:\n bgp_asn:\n description:\n- - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n+ - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n", "issue": "ec2_customer_gateway: bgp_asn is not required\n### Summary\n\nThe ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter.\r\n\r\nIt says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method:\r\n\r\n```\r\n def ensure_cgw_present(self, bgp_asn, ip_address):\r\n if not bgp_asn:\r\n bgp_asn = 65000\r\n response = self.ec2.create_customer_gateway(\r\n DryRun=False,\r\n Type='ipsec.1',\r\n PublicIp=ip_address,\r\n BgpAsn=bgp_asn,\r\n )\r\n return response\n\n### Issue Type\n\nDocumentation Report\n\n### Component Name\n\nec2_customer_gateway\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.4]\r\n config file = None\r\n configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible\r\n ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible\r\n python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]\r\n jinja version = 3.1.1\r\n libyaml = True\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\n$ ansible-galaxy collection list\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nmain branch, as of 2022-04-18.\n\n### Additional Information\n\nSuggested rewording:\r\n\r\n```\r\noptions:\r\n bgp_asn:\r\n description:\r\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000.\r\n type: int\r\n```\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py"}]}
3,184
136
gh_patches_debug_32418
rasdani/github-patches
git_diff
internetarchive__openlibrary-5998
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add covers to standard ebooks script Subtask of #5792 We discovered in #5993 (Thank you @hornc !) that our import endpoint already supports covers! We just need `cover: "http://foo..."` in the json. Now we just need to update the standard ebooks importer to include the cover in the import request. ### Related files https://github.com/internetarchive/openlibrary/blob/master/scripts/import_standard_ebooks.py --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/import_standard_ebooks.py` Content: ``` 1 #! /usr/bin/env python 2 import json 3 import requests 4 import time 5 from typing import Any, Optional 6 7 import os.path as path 8 9 import feedparser 10 11 from openlibrary.core.imports import Batch 12 from scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI 13 from openlibrary.config import load_config 14 from infogami import config # noqa: F401 15 16 FEED_URL = 'https://standardebooks.org/opds/all' 17 LAST_UPDATED_TIME = './standard_ebooks_last_updated.txt' 18 19 20 def get_feed(): 21 """Fetches and returns Standard Ebook's feed.""" 22 r = requests.get(FEED_URL) 23 return feedparser.parse(r.text) 24 25 26 def map_data(entry) -> dict[str, Any]: 27 """Maps Standard Ebooks feed entry to an Open Library import object.""" 28 std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '') 29 return { 30 "title": entry.title, 31 "source_records": [f"standard_ebooks:{std_ebooks_id}"], 32 "publishers": [entry.publisher], 33 "publish_date": entry.dc_issued[0:4], 34 "authors": [{"name": author.name} for author in entry.authors], 35 "description": entry.content[0].value, 36 "subjects": [tag.term for tag in entry.tags], 37 "identifiers": { 38 "standard_ebooks": [std_ebooks_id] 39 } 40 } 41 42 43 def create_batch(records: list[dict[str, str]]) -> None: 44 """Creates Standard Ebook batch import job. 45 46 Attempts to find existing Standard Ebooks import batch. 47 If nothing is found, a new batch is created. All of the 48 given import records are added to the batch job as JSON strings. 49 """ 50 now = time.gmtime(time.time()) 51 batch_name = f'standardebooks-{now.tm_year}{now.tm_mon}' 52 batch = Batch.find(batch_name) or Batch.new(batch_name) 53 batch.add_items([{ 54 'ia_id': r['source_records'][0], 55 'data': json.dumps(r)} for r in records] 56 ) 57 58 59 def get_last_updated_time() -> Optional[str]: 60 """Gets date of last import job. 61 62 Last updated dates are read from a local file. If no 63 file exists, None is returned. Last updated date is 64 expected to be in HTTP-date format: 65 https://httpwg.org/specs/rfc7231.html#http.date 66 67 returns last updated date string or None 68 """ 69 if path.exists(LAST_UPDATED_TIME): 70 with open(LAST_UPDATED_TIME) as f: 71 return f.readline() 72 73 return None 74 75 76 def find_last_updated() -> Optional[str]: 77 """Fetches and returns Standard Ebooks most recent update date. 78 79 Returns None if the last modified date is not included in the 80 response headers. 81 """ 82 r = requests.head(FEED_URL) 83 return r.headers['last-modified'] if r.ok else None 84 85 86 def convert_date_string(date_string: Optional[str]) -> time.struct_time: 87 """Converts HTTP-date format string into a struct_time object. 88 89 The date_string will be formatted similarly to this: 90 Fri, 05 Nov 2021 03:50:24 GMT 91 92 returns struct_time representation of the given time, or the 93 epoch if no time given. 94 95 >>> str(convert_date_string(None)) # doctest: +NORMALIZE_WHITESPACE 96 'time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, 97 tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)' 98 99 >>> convert_date_string("") # doctest: +ELLIPSIS 100 time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ... 101 102 >>> convert_date_string(0) # doctest: +ELLIPSIS 103 time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ... 104 105 >>> convert_date_string("Fri, 05 Nov 2021 03:50:24 GMT") # doctest: +ELLIPSIS 106 time.struct_time(tm_year=2021, tm_mon=11, tm_mday=5, tm_hour=3, tm_min=50, ... 107 """ 108 if not date_string: 109 return time.gmtime(0) 110 return time.strptime(date_string[5:-4], '%d %b %Y %H:%M:%S') 111 112 113 def filter_modified_since( 114 entries, 115 modified_since: time.struct_time 116 ) -> list[dict[str, str]]: 117 """Returns a list of import objects.""" 118 return [map_data(e) for e in entries if e.updated_parsed > modified_since] 119 120 121 def import_job( 122 ol_config: str, 123 dry_run=False, 124 ) -> None: 125 """ 126 :param ol_config: Path to openlibrary.yml file 127 :param dry_run: If true, only print out records to import 128 """ 129 load_config(ol_config) 130 131 # Make HEAD request to get last-modified time 132 last_modified = find_last_updated() 133 134 if not last_modified: 135 print(f'HEAD request to {FEED_URL} failed. Not attempting GET request.') 136 return 137 138 print(f'Last-Modified date: {last_modified}') 139 140 updated_on = get_last_updated_time() 141 if last_modified == updated_on: 142 print(f'No new updates since {updated_on}. Processing completed.') 143 return 144 145 print(f'Last import job: {updated_on or "No date found"}') 146 # Get feed: 147 d = get_feed() 148 149 # Create datetime using updated_on: 150 modified_since = convert_date_string(updated_on) 151 152 # Map feed entries to list of import objects: 153 print(f'Importing all entries that have been updated since {modified_since}.') 154 modified_entries = filter_modified_since(d.entries, modified_since) 155 print(f'{len(modified_entries)} import objects created.') 156 157 if not dry_run: 158 create_batch(modified_entries) 159 print(f'{len(modified_entries)} entries added to the batch import job.') 160 else: 161 for record in modified_entries: 162 print(json.dumps(record)) 163 164 # Store timestamp for header 165 with open(LAST_UPDATED_TIME, 'w+') as f: 166 f.write(last_modified) 167 print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}') 168 169 170 if __name__ == '__main__': 171 print("Start: Standard Ebooks import job") 172 FnToCLI(import_job).run() 173 print("End: Standard Ebooks import job") 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/import_standard_ebooks.py b/scripts/import_standard_ebooks.py old mode 100644 new mode 100755 --- a/scripts/import_standard_ebooks.py +++ b/scripts/import_standard_ebooks.py @@ -15,7 +15,8 @@ FEED_URL = 'https://standardebooks.org/opds/all' LAST_UPDATED_TIME = './standard_ebooks_last_updated.txt' - +IMAGE_REL = 'http://opds-spec.org/image' +BASE_SE_URL = 'https://standardebooks.org' def get_feed(): """Fetches and returns Standard Ebook's feed.""" @@ -26,7 +27,9 @@ def map_data(entry) -> dict[str, Any]: """Maps Standard Ebooks feed entry to an Open Library import object.""" std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '') - return { + image_uris = filter(lambda link: link.rel == IMAGE_REL, entry.links) + + import_record = { "title": entry.title, "source_records": [f"standard_ebooks:{std_ebooks_id}"], "publishers": [entry.publisher], @@ -39,6 +42,11 @@ } } + if image_uris: + import_record['cover'] = f'{BASE_SE_URL}{list(image_uris)[0]["href"]}' + + return import_record + def create_batch(records: list[dict[str, str]]) -> None: """Creates Standard Ebook batch import job. @@ -162,9 +170,10 @@ print(json.dumps(record)) # Store timestamp for header - with open(LAST_UPDATED_TIME, 'w+') as f: - f.write(last_modified) - print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}') + if not dry_run: + with open(LAST_UPDATED_TIME, 'w+') as f: + f.write(last_modified) + print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}') if __name__ == '__main__':
{"golden_diff": "diff --git a/scripts/import_standard_ebooks.py b/scripts/import_standard_ebooks.py\nold mode 100644\nnew mode 100755\n--- a/scripts/import_standard_ebooks.py\n+++ b/scripts/import_standard_ebooks.py\n@@ -15,7 +15,8 @@\n \n FEED_URL = 'https://standardebooks.org/opds/all'\n LAST_UPDATED_TIME = './standard_ebooks_last_updated.txt'\n-\n+IMAGE_REL = 'http://opds-spec.org/image'\n+BASE_SE_URL = 'https://standardebooks.org'\n \n def get_feed():\n \"\"\"Fetches and returns Standard Ebook's feed.\"\"\"\n@@ -26,7 +27,9 @@\n def map_data(entry) -> dict[str, Any]:\n \"\"\"Maps Standard Ebooks feed entry to an Open Library import object.\"\"\"\n std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '')\n- return {\n+ image_uris = filter(lambda link: link.rel == IMAGE_REL, entry.links)\n+\n+ import_record = {\n \"title\": entry.title,\n \"source_records\": [f\"standard_ebooks:{std_ebooks_id}\"],\n \"publishers\": [entry.publisher],\n@@ -39,6 +42,11 @@\n }\n }\n \n+ if image_uris:\n+ import_record['cover'] = f'{BASE_SE_URL}{list(image_uris)[0][\"href\"]}'\n+\n+ return import_record\n+\n \n def create_batch(records: list[dict[str, str]]) -> None:\n \"\"\"Creates Standard Ebook batch import job.\n@@ -162,9 +170,10 @@\n print(json.dumps(record))\n \n # Store timestamp for header\n- with open(LAST_UPDATED_TIME, 'w+') as f:\n- f.write(last_modified)\n- print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')\n+ if not dry_run:\n+ with open(LAST_UPDATED_TIME, 'w+') as f:\n+ f.write(last_modified)\n+ print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')\n \n \n if __name__ == '__main__':\n", "issue": "Add covers to standard ebooks script\nSubtask of #5792 \r\n\r\nWe discovered in #5993 (Thank you @hornc !) that our import endpoint already supports covers! We just need `cover: \"http://foo...\"` in the json.\r\n\r\nNow we just need to update the standard ebooks importer to include the cover in the import request.\r\n\r\n### Related files\r\n\r\nhttps://github.com/internetarchive/openlibrary/blob/master/scripts/import_standard_ebooks.py\n", "before_files": [{"content": "#! /usr/bin/env python\nimport json\nimport requests\nimport time\nfrom typing import Any, Optional\n\nimport os.path as path\n\nimport feedparser\n\nfrom openlibrary.core.imports import Batch\nfrom scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI\nfrom openlibrary.config import load_config\nfrom infogami import config # noqa: F401\n\nFEED_URL = 'https://standardebooks.org/opds/all'\nLAST_UPDATED_TIME = './standard_ebooks_last_updated.txt'\n\n\ndef get_feed():\n \"\"\"Fetches and returns Standard Ebook's feed.\"\"\"\n r = requests.get(FEED_URL)\n return feedparser.parse(r.text)\n\n\ndef map_data(entry) -> dict[str, Any]:\n \"\"\"Maps Standard Ebooks feed entry to an Open Library import object.\"\"\"\n std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '')\n return {\n \"title\": entry.title,\n \"source_records\": [f\"standard_ebooks:{std_ebooks_id}\"],\n \"publishers\": [entry.publisher],\n \"publish_date\": entry.dc_issued[0:4],\n \"authors\": [{\"name\": author.name} for author in entry.authors],\n \"description\": entry.content[0].value,\n \"subjects\": [tag.term for tag in entry.tags],\n \"identifiers\": {\n \"standard_ebooks\": [std_ebooks_id]\n }\n }\n\n\ndef create_batch(records: list[dict[str, str]]) -> None:\n \"\"\"Creates Standard Ebook batch import job.\n\n Attempts to find existing Standard Ebooks import batch.\n If nothing is found, a new batch is created. All of the\n given import records are added to the batch job as JSON strings.\n \"\"\"\n now = time.gmtime(time.time())\n batch_name = f'standardebooks-{now.tm_year}{now.tm_mon}'\n batch = Batch.find(batch_name) or Batch.new(batch_name)\n batch.add_items([{\n 'ia_id': r['source_records'][0],\n 'data': json.dumps(r)} for r in records]\n )\n\n\ndef get_last_updated_time() -> Optional[str]:\n \"\"\"Gets date of last import job.\n\n Last updated dates are read from a local file. If no\n file exists, None is returned. Last updated date is\n expected to be in HTTP-date format:\n https://httpwg.org/specs/rfc7231.html#http.date\n\n returns last updated date string or None\n \"\"\"\n if path.exists(LAST_UPDATED_TIME):\n with open(LAST_UPDATED_TIME) as f:\n return f.readline()\n\n return None\n\n\ndef find_last_updated() -> Optional[str]:\n \"\"\"Fetches and returns Standard Ebooks most recent update date.\n\n Returns None if the last modified date is not included in the\n response headers.\n \"\"\"\n r = requests.head(FEED_URL)\n return r.headers['last-modified'] if r.ok else None\n\n\ndef convert_date_string(date_string: Optional[str]) -> time.struct_time:\n \"\"\"Converts HTTP-date format string into a struct_time object.\n\n The date_string will be formatted similarly to this:\n Fri, 05 Nov 2021 03:50:24 GMT\n\n returns struct_time representation of the given time, or the\n epoch if no time given.\n\n >>> str(convert_date_string(None)) # doctest: +NORMALIZE_WHITESPACE\n 'time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0,\n tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)'\n\n >>> convert_date_string(\"\") # doctest: +ELLIPSIS\n time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ...\n\n >>> convert_date_string(0) # doctest: +ELLIPSIS\n time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ...\n\n >>> convert_date_string(\"Fri, 05 Nov 2021 03:50:24 GMT\") # doctest: +ELLIPSIS\n time.struct_time(tm_year=2021, tm_mon=11, tm_mday=5, tm_hour=3, tm_min=50, ...\n \"\"\"\n if not date_string:\n return time.gmtime(0)\n return time.strptime(date_string[5:-4], '%d %b %Y %H:%M:%S')\n\n\ndef filter_modified_since(\n entries,\n modified_since: time.struct_time\n) -> list[dict[str, str]]:\n \"\"\"Returns a list of import objects.\"\"\"\n return [map_data(e) for e in entries if e.updated_parsed > modified_since]\n\n\ndef import_job(\n ol_config: str,\n dry_run=False,\n) -> None:\n \"\"\"\n :param ol_config: Path to openlibrary.yml file\n :param dry_run: If true, only print out records to import\n \"\"\"\n load_config(ol_config)\n\n # Make HEAD request to get last-modified time\n last_modified = find_last_updated()\n\n if not last_modified:\n print(f'HEAD request to {FEED_URL} failed. Not attempting GET request.')\n return\n\n print(f'Last-Modified date: {last_modified}')\n\n updated_on = get_last_updated_time()\n if last_modified == updated_on:\n print(f'No new updates since {updated_on}. Processing completed.')\n return\n\n print(f'Last import job: {updated_on or \"No date found\"}')\n # Get feed:\n d = get_feed()\n\n # Create datetime using updated_on:\n modified_since = convert_date_string(updated_on)\n\n # Map feed entries to list of import objects:\n print(f'Importing all entries that have been updated since {modified_since}.')\n modified_entries = filter_modified_since(d.entries, modified_since)\n print(f'{len(modified_entries)} import objects created.')\n\n if not dry_run:\n create_batch(modified_entries)\n print(f'{len(modified_entries)} entries added to the batch import job.')\n else:\n for record in modified_entries:\n print(json.dumps(record))\n\n # Store timestamp for header\n with open(LAST_UPDATED_TIME, 'w+') as f:\n f.write(last_modified)\n print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')\n\n\nif __name__ == '__main__':\n print(\"Start: Standard Ebooks import job\")\n FnToCLI(import_job).run()\n print(\"End: Standard Ebooks import job\")\n", "path": "scripts/import_standard_ebooks.py"}], "after_files": [{"content": "#! /usr/bin/env python\nimport json\nimport requests\nimport time\nfrom typing import Any, Optional\n\nimport os.path as path\n\nimport feedparser\n\nfrom openlibrary.core.imports import Batch\nfrom scripts.solr_builder.solr_builder.fn_to_cli import FnToCLI\nfrom openlibrary.config import load_config\nfrom infogami import config # noqa: F401\n\nFEED_URL = 'https://standardebooks.org/opds/all'\nLAST_UPDATED_TIME = './standard_ebooks_last_updated.txt'\nIMAGE_REL = 'http://opds-spec.org/image'\nBASE_SE_URL = 'https://standardebooks.org'\n\ndef get_feed():\n \"\"\"Fetches and returns Standard Ebook's feed.\"\"\"\n r = requests.get(FEED_URL)\n return feedparser.parse(r.text)\n\n\ndef map_data(entry) -> dict[str, Any]:\n \"\"\"Maps Standard Ebooks feed entry to an Open Library import object.\"\"\"\n std_ebooks_id = entry.id.replace('https://standardebooks.org/ebooks/', '')\n image_uris = filter(lambda link: link.rel == IMAGE_REL, entry.links)\n\n import_record = {\n \"title\": entry.title,\n \"source_records\": [f\"standard_ebooks:{std_ebooks_id}\"],\n \"publishers\": [entry.publisher],\n \"publish_date\": entry.dc_issued[0:4],\n \"authors\": [{\"name\": author.name} for author in entry.authors],\n \"description\": entry.content[0].value,\n \"subjects\": [tag.term for tag in entry.tags],\n \"identifiers\": {\n \"standard_ebooks\": [std_ebooks_id]\n }\n }\n\n if image_uris:\n import_record['cover'] = f'{BASE_SE_URL}{list(image_uris)[0][\"href\"]}'\n\n return import_record\n\n\ndef create_batch(records: list[dict[str, str]]) -> None:\n \"\"\"Creates Standard Ebook batch import job.\n\n Attempts to find existing Standard Ebooks import batch.\n If nothing is found, a new batch is created. All of the\n given import records are added to the batch job as JSON strings.\n \"\"\"\n now = time.gmtime(time.time())\n batch_name = f'standardebooks-{now.tm_year}{now.tm_mon}'\n batch = Batch.find(batch_name) or Batch.new(batch_name)\n batch.add_items([{\n 'ia_id': r['source_records'][0],\n 'data': json.dumps(r)} for r in records]\n )\n\n\ndef get_last_updated_time() -> Optional[str]:\n \"\"\"Gets date of last import job.\n\n Last updated dates are read from a local file. If no\n file exists, None is returned. Last updated date is\n expected to be in HTTP-date format:\n https://httpwg.org/specs/rfc7231.html#http.date\n\n returns last updated date string or None\n \"\"\"\n if path.exists(LAST_UPDATED_TIME):\n with open(LAST_UPDATED_TIME) as f:\n return f.readline()\n\n return None\n\n\ndef find_last_updated() -> Optional[str]:\n \"\"\"Fetches and returns Standard Ebooks most recent update date.\n\n Returns None if the last modified date is not included in the\n response headers.\n \"\"\"\n r = requests.head(FEED_URL)\n return r.headers['last-modified'] if r.ok else None\n\n\ndef convert_date_string(date_string: Optional[str]) -> time.struct_time:\n \"\"\"Converts HTTP-date format string into a struct_time object.\n\n The date_string will be formatted similarly to this:\n Fri, 05 Nov 2021 03:50:24 GMT\n\n returns struct_time representation of the given time, or the\n epoch if no time given.\n\n >>> str(convert_date_string(None)) # doctest: +NORMALIZE_WHITESPACE\n 'time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0,\n tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)'\n\n >>> convert_date_string(\"\") # doctest: +ELLIPSIS\n time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ...\n\n >>> convert_date_string(0) # doctest: +ELLIPSIS\n time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, ...\n\n >>> convert_date_string(\"Fri, 05 Nov 2021 03:50:24 GMT\") # doctest: +ELLIPSIS\n time.struct_time(tm_year=2021, tm_mon=11, tm_mday=5, tm_hour=3, tm_min=50, ...\n \"\"\"\n if not date_string:\n return time.gmtime(0)\n return time.strptime(date_string[5:-4], '%d %b %Y %H:%M:%S')\n\n\ndef filter_modified_since(\n entries,\n modified_since: time.struct_time\n) -> list[dict[str, str]]:\n \"\"\"Returns a list of import objects.\"\"\"\n return [map_data(e) for e in entries if e.updated_parsed > modified_since]\n\n\ndef import_job(\n ol_config: str,\n dry_run=False,\n) -> None:\n \"\"\"\n :param ol_config: Path to openlibrary.yml file\n :param dry_run: If true, only print out records to import\n \"\"\"\n load_config(ol_config)\n\n # Make HEAD request to get last-modified time\n last_modified = find_last_updated()\n\n if not last_modified:\n print(f'HEAD request to {FEED_URL} failed. Not attempting GET request.')\n return\n\n print(f'Last-Modified date: {last_modified}')\n\n updated_on = get_last_updated_time()\n if last_modified == updated_on:\n print(f'No new updates since {updated_on}. Processing completed.')\n return\n\n print(f'Last import job: {updated_on or \"No date found\"}')\n # Get feed:\n d = get_feed()\n\n # Create datetime using updated_on:\n modified_since = convert_date_string(updated_on)\n\n # Map feed entries to list of import objects:\n print(f'Importing all entries that have been updated since {modified_since}.')\n modified_entries = filter_modified_since(d.entries, modified_since)\n print(f'{len(modified_entries)} import objects created.')\n\n if not dry_run:\n create_batch(modified_entries)\n print(f'{len(modified_entries)} entries added to the batch import job.')\n else:\n for record in modified_entries:\n print(json.dumps(record))\n\n # Store timestamp for header\n if not dry_run:\n with open(LAST_UPDATED_TIME, 'w+') as f:\n f.write(last_modified)\n print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')\n\n\nif __name__ == '__main__':\n print(\"Start: Standard Ebooks import job\")\n FnToCLI(import_job).run()\n print(\"End: Standard Ebooks import job\")\n", "path": "scripts/import_standard_ebooks.py"}]}
2,239
465
gh_patches_debug_12487
rasdani/github-patches
git_diff
joke2k__faker-325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to generate lorem characters without punctuation Sometimes I want to generate a string of characters of a specific length without any punctuation or capitalization but the lorem provider currently does not allow for this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/providers/python/__init__.py` Content: ``` 1 # coding=utf-8 2 3 from __future__ import unicode_literals 4 5 from decimal import Decimal 6 import sys 7 8 from faker.providers.lorem.la import Provider as Lorem 9 10 from .. import BaseProvider 11 12 13 if sys.version_info[0] == 2: 14 string_types = (basestring,) 15 elif sys.version_info[0] == 3: 16 string_types = (str, bytes) 17 else: 18 raise SystemError("Unrecognized python version: {}".format(sys.version_info[0])) 19 20 21 class Provider(BaseProvider): 22 @classmethod 23 def pybool(cls): 24 return cls.random_int(0, 1) == 1 25 26 @classmethod 27 def pystr(cls, max_chars=20): 28 return Lorem.text(max_chars) 29 30 @classmethod 31 def pyfloat(cls, left_digits=None, right_digits=None, positive=False): 32 left_digits = left_digits or cls.random_int(1, sys.float_info.dig) 33 right_digits = right_digits or cls.random_int(0, sys.float_info.dig - left_digits) 34 sign = 1 if positive or cls.random_int(0, 1) else -1 35 36 return float("{0}.{1}".format( 37 sign * cls.random_number(left_digits), cls.random_number(right_digits) 38 )) 39 40 @classmethod 41 def pyint(cls): 42 return cls.random_int() 43 44 @classmethod 45 def pydecimal(cls, left_digits=None, right_digits=None, positive=False): 46 return Decimal(str(cls.pyfloat(left_digits, right_digits, positive))) 47 48 def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types): 49 return tuple(self.pyset(nb_elements, variable_nb_elements, *value_types)) 50 51 def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types): 52 return set(self._pyiterable(nb_elements, variable_nb_elements, *value_types)) 53 54 def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types): 55 return list(self._pyiterable(nb_elements, variable_nb_elements, *value_types)) 56 57 def pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types): 58 return self.random_element([self.pylist, self.pytuple, self.pyset])(nb_elements, variable_nb_elements, *value_types) 59 60 def _random_type(self, type_list): 61 value_type = self.random_element(type_list) 62 63 method_name = "py{0}".format(value_type) 64 if hasattr(self, method_name): 65 value_type = method_name 66 67 return self.generator.format(value_type) 68 69 def _pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types): 70 71 value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower() 72 for t in value_types 73 # avoid recursion 74 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']] 75 if not value_types: 76 value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email'] 77 78 if variable_nb_elements: 79 nb_elements = self.randomize_nb_elements(nb_elements) 80 81 for f in range(nb_elements): 82 yield self._random_type(value_types) 83 84 def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types): 85 """ 86 Use this function to generate data, returns a touple containing 87 a list, a dictionary and a nested dictionary. 88 """ 89 if variable_nb_elements: 90 nb_elements = self.randomize_nb_elements(nb_elements) 91 92 return dict(zip( 93 Lorem.words(nb_elements), 94 self._pyiterable(nb_elements, False, *value_types) 95 )) 96 97 def pystruct(self, count=10, *value_types): 98 99 value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower() 100 for t in value_types 101 # avoid recursion 102 if t != 'struct'] 103 if not value_types: 104 value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email'] 105 106 l = [] 107 d = {} 108 nd = {} 109 for i in range(count): 110 d[Lorem.word()] = self._random_type(value_types) 111 l.append(self._random_type(value_types)) 112 nd[Lorem.word()] = { 113 i: self._random_type(value_types), 114 i + 1: [self._random_type(value_types), self._random_type(value_types), self._random_type(value_types)], 115 i + 2: { 116 i: self._random_type(value_types), 117 i + 1: self._random_type(value_types), 118 i + 2: [ 119 self._random_type(value_types), 120 self._random_type(value_types) 121 ] 122 } 123 } 124 return l, d, nd 125 ``` Path: `faker/providers/lorem/__init__.py` Content: ``` 1 localized = True 2 default_locale = 'la' 3 4 from .. import BaseProvider 5 6 7 class Provider(BaseProvider): 8 @classmethod 9 def word(cls): 10 """ 11 :example 'Lorem' 12 """ 13 return cls.random_element(cls.word_list) 14 15 @classmethod 16 def words(cls, nb=3): 17 """ 18 Generate an array of random words 19 :example array('Lorem', 'ipsum', 'dolor') 20 :param nb how many words to return 21 """ 22 return [cls.word() for _ in range(0, nb)] 23 24 @classmethod 25 def sentence(cls, nb_words=6, variable_nb_words=True): 26 """ 27 Generate a random sentence 28 :example 'Lorem ipsum dolor sit amet.' 29 :param nb_words around how many words the sentence should contain 30 :param variable_nb_words set to false if you want exactly $nbWords returned, 31 otherwise $nbWords may vary by +/-40% with a minimum of 1 32 """ 33 if nb_words <= 0: 34 return '' 35 36 if variable_nb_words: 37 nb_words = cls.randomize_nb_elements(nb_words) 38 39 words = cls.words(nb_words) 40 words[0] = words[0].title() 41 42 return " ".join(words) + '.' 43 44 @classmethod 45 def sentences(cls, nb=3): 46 """ 47 Generate an array of sentences 48 :example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.') 49 :param nb how many sentences to return 50 :return list 51 """ 52 return [cls.sentence() for _ in range(0, nb)] 53 54 @classmethod 55 def paragraph(cls, nb_sentences=3, variable_nb_sentences=True): 56 """ 57 Generate a single paragraph 58 :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.' 59 :param nb_sentences around how many sentences the paragraph should contain 60 :param variable_nb_sentences set to false if you want exactly $nbSentences returned, 61 otherwise $nbSentences may vary by +/-40% with a minimum of 1 62 :return string 63 """ 64 if nb_sentences <= 0: 65 return '' 66 67 if variable_nb_sentences: 68 nb_sentences = cls.randomize_nb_elements(nb_sentences) 69 70 return " ".join(cls.sentences(nb_sentences)) 71 72 @classmethod 73 def paragraphs(cls, nb=3): 74 """ 75 Generate an array of paragraphs 76 :example array($paragraph1, $paragraph2, $paragraph3) 77 :param nb how many paragraphs to return 78 :return array 79 """ 80 return [cls.paragraph() for _ in range(0, nb)] 81 82 @classmethod 83 def text(cls, max_nb_chars=200): 84 """ 85 Generate a text string. 86 Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs. 87 :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.' 88 :param max_nb_chars Maximum number of characters the text should contain (minimum 5) 89 :return string 90 """ 91 text = [] 92 if max_nb_chars < 5: 93 raise ValueError('text() can only generate text of at least 5 characters') 94 95 if max_nb_chars < 25: 96 # join words 97 while not text: 98 size = 0 99 # determine how many words are needed to reach the $max_nb_chars once; 100 while size < max_nb_chars: 101 word = (' ' if size else '') + cls.word() 102 text.append(word) 103 size += len(word) 104 text.pop() 105 text[0] = text[0][0].upper() + text[0][1:] 106 last_index = len(text) - 1 107 text[last_index] += '.' 108 elif max_nb_chars < 100: 109 # join sentences 110 while not text: 111 size = 0 112 # determine how many sentences are needed to reach the $max_nb_chars once 113 while size < max_nb_chars: 114 sentence = (' ' if size else '') + cls.sentence() 115 text.append(sentence) 116 size += len(sentence) 117 text.pop() 118 else: 119 # join paragraphs 120 while not text: 121 size = 0 122 # determine how many paragraphs are needed to reach the $max_nb_chars once 123 while size < max_nb_chars: 124 paragraph = ('\n' if size else '') + cls.paragraph() 125 text.append(paragraph) 126 size += len(paragraph) 127 text.pop() 128 129 return "".join(text) 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py --- a/faker/providers/lorem/__init__.py +++ b/faker/providers/lorem/__init__.py @@ -8,7 +8,8 @@ @classmethod def word(cls): """ - :example 'Lorem' + Generate a random word + :example 'lorem' """ return cls.random_element(cls.word_list) diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py --- a/faker/providers/python/__init__.py +++ b/faker/providers/python/__init__.py @@ -25,7 +25,7 @@ @classmethod def pystr(cls, max_chars=20): - return Lorem.text(max_chars) + return "".join(cls.random_letter() for i in range(max_chars)) @classmethod def pyfloat(cls, left_digits=None, right_digits=None, positive=False):
{"golden_diff": "diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py\n--- a/faker/providers/lorem/__init__.py\n+++ b/faker/providers/lorem/__init__.py\n@@ -8,7 +8,8 @@\n @classmethod\n def word(cls):\n \"\"\"\n- :example 'Lorem'\n+ Generate a random word\n+ :example 'lorem'\n \"\"\"\n return cls.random_element(cls.word_list)\n \ndiff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -25,7 +25,7 @@\n \n @classmethod\n def pystr(cls, max_chars=20):\n- return Lorem.text(max_chars)\n+ return \"\".join(cls.random_letter() for i in range(max_chars))\n \n @classmethod\n def pyfloat(cls, left_digits=None, right_digits=None, positive=False):\n", "issue": "Add ability to generate lorem characters without punctuation\nSometimes I want to generate a string of characters of a specific length without any punctuation or capitalization but the lorem provider currently does not allow for this. \n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nfrom faker.providers.lorem.la import Provider as Lorem\n\nfrom .. import BaseProvider\n\n\nif sys.version_info[0] == 2:\n string_types = (basestring,)\nelif sys.version_info[0] == 3:\n string_types = (str, bytes)\nelse:\n raise SystemError(\"Unrecognized python version: {}\".format(sys.version_info[0]))\n\n\nclass Provider(BaseProvider):\n @classmethod\n def pybool(cls):\n return cls.random_int(0, 1) == 1\n\n @classmethod\n def pystr(cls, max_chars=20):\n return Lorem.text(max_chars)\n\n @classmethod\n def pyfloat(cls, left_digits=None, right_digits=None, positive=False):\n left_digits = left_digits or cls.random_int(1, sys.float_info.dig)\n right_digits = right_digits or cls.random_int(0, sys.float_info.dig - left_digits)\n sign = 1 if positive or cls.random_int(0, 1) else -1\n\n return float(\"{0}.{1}\".format(\n sign * cls.random_number(left_digits), cls.random_number(right_digits)\n ))\n\n @classmethod\n def pyint(cls):\n return cls.random_int()\n\n @classmethod\n def pydecimal(cls, left_digits=None, right_digits=None, positive=False):\n return Decimal(str(cls.pyfloat(left_digits, right_digits, positive)))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(self.pyset(nb_elements, variable_nb_elements, *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(self._pyiterable(nb_elements, variable_nb_elements, *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(self._pyiterable(nb_elements, variable_nb_elements, *value_types))\n\n def pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):\n\n value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements)\n\n for f in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Use this function to generate data, returns a touple containing\n a list, a dictionary and a nested dictionary.\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements)\n\n return dict(zip(\n Lorem.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types)\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n l = []\n d = {}\n nd = {}\n for i in range(count):\n d[Lorem.word()] = self._random_type(value_types)\n l.append(self._random_type(value_types))\n nd[Lorem.word()] = {\n i: self._random_type(value_types),\n i + 1: [self._random_type(value_types), self._random_type(value_types), self._random_type(value_types)],\n i + 2: {\n i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [\n self._random_type(value_types),\n self._random_type(value_types)\n ]\n }\n }\n return l, d, nd\n", "path": "faker/providers/python/__init__.py"}, {"content": "localized = True\ndefault_locale = 'la'\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n @classmethod\n def word(cls):\n \"\"\"\n :example 'Lorem'\n \"\"\"\n return cls.random_element(cls.word_list)\n\n @classmethod\n def words(cls, nb=3):\n \"\"\"\n Generate an array of random words\n :example array('Lorem', 'ipsum', 'dolor')\n :param nb how many words to return\n \"\"\"\n return [cls.word() for _ in range(0, nb)]\n\n @classmethod\n def sentence(cls, nb_words=6, variable_nb_words=True):\n \"\"\"\n Generate a random sentence\n :example 'Lorem ipsum dolor sit amet.'\n :param nb_words around how many words the sentence should contain\n :param variable_nb_words set to false if you want exactly $nbWords returned,\n otherwise $nbWords may vary by +/-40% with a minimum of 1\n \"\"\"\n if nb_words <= 0:\n return ''\n\n if variable_nb_words:\n nb_words = cls.randomize_nb_elements(nb_words)\n\n words = cls.words(nb_words)\n words[0] = words[0].title()\n\n return \" \".join(words) + '.'\n\n @classmethod\n def sentences(cls, nb=3):\n \"\"\"\n Generate an array of sentences\n :example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')\n :param nb how many sentences to return\n :return list\n \"\"\"\n return [cls.sentence() for _ in range(0, nb)]\n\n @classmethod\n def paragraph(cls, nb_sentences=3, variable_nb_sentences=True):\n \"\"\"\n Generate a single paragraph\n :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'\n :param nb_sentences around how many sentences the paragraph should contain\n :param variable_nb_sentences set to false if you want exactly $nbSentences returned,\n otherwise $nbSentences may vary by +/-40% with a minimum of 1\n :return string\n \"\"\"\n if nb_sentences <= 0:\n return ''\n\n if variable_nb_sentences:\n nb_sentences = cls.randomize_nb_elements(nb_sentences)\n\n return \" \".join(cls.sentences(nb_sentences))\n\n @classmethod\n def paragraphs(cls, nb=3):\n \"\"\"\n Generate an array of paragraphs\n :example array($paragraph1, $paragraph2, $paragraph3)\n :param nb how many paragraphs to return\n :return array\n \"\"\"\n return [cls.paragraph() for _ in range(0, nb)]\n\n @classmethod\n def text(cls, max_nb_chars=200):\n \"\"\"\n Generate a text string.\n Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.\n :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'\n :param max_nb_chars Maximum number of characters the text should contain (minimum 5)\n :return string\n \"\"\"\n text = []\n if max_nb_chars < 5:\n raise ValueError('text() can only generate text of at least 5 characters')\n\n if max_nb_chars < 25:\n # join words\n while not text:\n size = 0\n # determine how many words are needed to reach the $max_nb_chars once;\n while size < max_nb_chars:\n word = (' ' if size else '') + cls.word()\n text.append(word)\n size += len(word)\n text.pop()\n text[0] = text[0][0].upper() + text[0][1:]\n last_index = len(text) - 1\n text[last_index] += '.'\n elif max_nb_chars < 100:\n # join sentences\n while not text:\n size = 0\n # determine how many sentences are needed to reach the $max_nb_chars once\n while size < max_nb_chars:\n sentence = (' ' if size else '') + cls.sentence()\n text.append(sentence)\n size += len(sentence)\n text.pop()\n else:\n # join paragraphs\n while not text:\n size = 0\n # determine how many paragraphs are needed to reach the $max_nb_chars once\n while size < max_nb_chars:\n paragraph = ('\\n' if size else '') + cls.paragraph()\n text.append(paragraph)\n size += len(paragraph)\n text.pop()\n\n return \"\".join(text)\n", "path": "faker/providers/lorem/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nfrom decimal import Decimal\nimport sys\n\nfrom faker.providers.lorem.la import Provider as Lorem\n\nfrom .. import BaseProvider\n\n\nif sys.version_info[0] == 2:\n string_types = (basestring,)\nelif sys.version_info[0] == 3:\n string_types = (str, bytes)\nelse:\n raise SystemError(\"Unrecognized python version: {}\".format(sys.version_info[0]))\n\n\nclass Provider(BaseProvider):\n @classmethod\n def pybool(cls):\n return cls.random_int(0, 1) == 1\n\n @classmethod\n def pystr(cls, max_chars=20):\n return \"\".join(cls.random_letter() for i in range(max_chars))\n\n @classmethod\n def pyfloat(cls, left_digits=None, right_digits=None, positive=False):\n left_digits = left_digits or cls.random_int(1, sys.float_info.dig)\n right_digits = right_digits or cls.random_int(0, sys.float_info.dig - left_digits)\n sign = 1 if positive or cls.random_int(0, 1) else -1\n\n return float(\"{0}.{1}\".format(\n sign * cls.random_number(left_digits), cls.random_number(right_digits)\n ))\n\n @classmethod\n def pyint(cls):\n return cls.random_int()\n\n @classmethod\n def pydecimal(cls, left_digits=None, right_digits=None, positive=False):\n return Decimal(str(cls.pyfloat(left_digits, right_digits, positive)))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return tuple(self.pyset(nb_elements, variable_nb_elements, *value_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return set(self._pyiterable(nb_elements, variable_nb_elements, *value_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return list(self._pyiterable(nb_elements, variable_nb_elements, *value_types))\n\n def pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):\n return self.random_element([self.pylist, self.pytuple, self.pyset])(nb_elements, variable_nb_elements, *value_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = \"py{0}\".format(value_type)\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):\n\n value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements)\n\n for f in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):\n \"\"\"\n Use this function to generate data, returns a touple containing\n a list, a dictionary and a nested dictionary.\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements)\n\n return dict(zip(\n Lorem.words(nb_elements),\n self._pyiterable(nb_elements, False, *value_types)\n ))\n\n def pystruct(self, count=10, *value_types):\n\n value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']\n\n l = []\n d = {}\n nd = {}\n for i in range(count):\n d[Lorem.word()] = self._random_type(value_types)\n l.append(self._random_type(value_types))\n nd[Lorem.word()] = {\n i: self._random_type(value_types),\n i + 1: [self._random_type(value_types), self._random_type(value_types), self._random_type(value_types)],\n i + 2: {\n i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [\n self._random_type(value_types),\n self._random_type(value_types)\n ]\n }\n }\n return l, d, nd\n", "path": "faker/providers/python/__init__.py"}, {"content": "localized = True\ndefault_locale = 'la'\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n @classmethod\n def word(cls):\n \"\"\"\n Generate a random word\n :example 'lorem'\n \"\"\"\n return cls.random_element(cls.word_list)\n\n @classmethod\n def words(cls, nb=3):\n \"\"\"\n Generate an array of random words\n :example array('Lorem', 'ipsum', 'dolor')\n :param nb how many words to return\n \"\"\"\n return [cls.word() for _ in range(0, nb)]\n\n @classmethod\n def sentence(cls, nb_words=6, variable_nb_words=True):\n \"\"\"\n Generate a random sentence\n :example 'Lorem ipsum dolor sit amet.'\n :param nb_words around how many words the sentence should contain\n :param variable_nb_words set to false if you want exactly $nbWords returned,\n otherwise $nbWords may vary by +/-40% with a minimum of 1\n \"\"\"\n if nb_words <= 0:\n return ''\n\n if variable_nb_words:\n nb_words = cls.randomize_nb_elements(nb_words)\n\n words = cls.words(nb_words)\n words[0] = words[0].title()\n\n return \" \".join(words) + '.'\n\n @classmethod\n def sentences(cls, nb=3):\n \"\"\"\n Generate an array of sentences\n :example array('Lorem ipsum dolor sit amet.', 'Consectetur adipisicing eli.')\n :param nb how many sentences to return\n :return list\n \"\"\"\n return [cls.sentence() for _ in range(0, nb)]\n\n @classmethod\n def paragraph(cls, nb_sentences=3, variable_nb_sentences=True):\n \"\"\"\n Generate a single paragraph\n :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'\n :param nb_sentences around how many sentences the paragraph should contain\n :param variable_nb_sentences set to false if you want exactly $nbSentences returned,\n otherwise $nbSentences may vary by +/-40% with a minimum of 1\n :return string\n \"\"\"\n if nb_sentences <= 0:\n return ''\n\n if variable_nb_sentences:\n nb_sentences = cls.randomize_nb_elements(nb_sentences)\n\n return \" \".join(cls.sentences(nb_sentences))\n\n @classmethod\n def paragraphs(cls, nb=3):\n \"\"\"\n Generate an array of paragraphs\n :example array($paragraph1, $paragraph2, $paragraph3)\n :param nb how many paragraphs to return\n :return array\n \"\"\"\n return [cls.paragraph() for _ in range(0, nb)]\n\n @classmethod\n def text(cls, max_nb_chars=200):\n \"\"\"\n Generate a text string.\n Depending on the $maxNbChars, returns a string made of words, sentences, or paragraphs.\n :example 'Sapiente sunt omnis. Ut pariatur ad autem ducimus et. Voluptas rem voluptas sint modi dolorem amet.'\n :param max_nb_chars Maximum number of characters the text should contain (minimum 5)\n :return string\n \"\"\"\n text = []\n if max_nb_chars < 5:\n raise ValueError('text() can only generate text of at least 5 characters')\n\n if max_nb_chars < 25:\n # join words\n while not text:\n size = 0\n # determine how many words are needed to reach the $max_nb_chars once;\n while size < max_nb_chars:\n word = (' ' if size else '') + cls.word()\n text.append(word)\n size += len(word)\n text.pop()\n text[0] = text[0][0].upper() + text[0][1:]\n last_index = len(text) - 1\n text[last_index] += '.'\n elif max_nb_chars < 100:\n # join sentences\n while not text:\n size = 0\n # determine how many sentences are needed to reach the $max_nb_chars once\n while size < max_nb_chars:\n sentence = (' ' if size else '') + cls.sentence()\n text.append(sentence)\n size += len(sentence)\n text.pop()\n else:\n # join paragraphs\n while not text:\n size = 0\n # determine how many paragraphs are needed to reach the $max_nb_chars once\n while size < max_nb_chars:\n paragraph = ('\\n' if size else '') + cls.paragraph()\n text.append(paragraph)\n size += len(paragraph)\n text.pop()\n\n return \"\".join(text)\n", "path": "faker/providers/lorem/__init__.py"}]}
2,962
226
gh_patches_debug_40791
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-459
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Datadog exporter shouldn't parse span description to determine if there was an exception The Datadog exporter currently [parses the status description](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L252) to determine if an exception was raised, yet [the specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#record-exception) requires these to be recorded as events (and [the python code already does this](https://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L690-L715)), so the Datadog exporter should read this data from the event list instead of hoping the description is properly formatted. The current code does say that [span events are not supported ](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L252), and while that would suggest that *reporting* events are not supported, we could certainly parse them for this data. I'll probably tackle this, I'm already working on #154. Also mentioned in #236, where I worked around the current behavior. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py` Content: ``` 1 DD_ORIGIN = "dd_origin" 2 AUTO_REJECT = 0 3 AUTO_KEEP = 1 4 USER_KEEP = 2 5 SAMPLE_RATE_METRIC_KEY = "_sample_rate" 6 SAMPLING_PRIORITY_KEY = "_sampling_priority_v1" 7 ENV_KEY = "env" 8 VERSION_KEY = "version" 9 SERVICE_NAME_TAG = "service.name" 10 ``` Path: `exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 import os 17 from urllib.parse import urlparse 18 19 from ddtrace.ext import SpanTypes as DatadogSpanTypes 20 from ddtrace.internal.writer import AgentWriter 21 from ddtrace.span import Span as DatadogSpan 22 23 import opentelemetry.trace as trace_api 24 from opentelemetry.exporter.datadog.constants import ( 25 DD_ORIGIN, 26 ENV_KEY, 27 SAMPLE_RATE_METRIC_KEY, 28 SERVICE_NAME_TAG, 29 VERSION_KEY, 30 ) 31 from opentelemetry.sdk.trace import sampling 32 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult 33 from opentelemetry.semconv.trace import SpanAttributes 34 35 logger = logging.getLogger(__name__) 36 37 38 DEFAULT_AGENT_URL = "http://localhost:8126" 39 _INSTRUMENTATION_SPAN_TYPES = { 40 "opentelemetry.instrumentation.aiohttp-client": DatadogSpanTypes.HTTP, 41 "opentelemetry.instrumentation.asgi": DatadogSpanTypes.WEB, 42 "opentelemetry.instrumentation.dbapi": DatadogSpanTypes.SQL, 43 "opentelemetry.instrumentation.django": DatadogSpanTypes.WEB, 44 "opentelemetry.instrumentation.flask": DatadogSpanTypes.WEB, 45 "opentelemetry.instrumentation.grpc": DatadogSpanTypes.GRPC, 46 "opentelemetry.instrumentation.jinja2": DatadogSpanTypes.TEMPLATE, 47 "opentelemetry.instrumentation.mysql": DatadogSpanTypes.SQL, 48 "opentelemetry.instrumentation.psycopg2": DatadogSpanTypes.SQL, 49 "opentelemetry.instrumentation.pymemcache": DatadogSpanTypes.CACHE, 50 "opentelemetry.instrumentation.pymongo": DatadogSpanTypes.MONGODB, 51 "opentelemetry.instrumentation.pymysql": DatadogSpanTypes.SQL, 52 "opentelemetry.instrumentation.redis": DatadogSpanTypes.REDIS, 53 "opentelemetry.instrumentation.requests": DatadogSpanTypes.HTTP, 54 "opentelemetry.instrumentation.sqlalchemy": DatadogSpanTypes.SQL, 55 "opentelemetry.instrumentation.wsgi": DatadogSpanTypes.WEB, 56 } 57 58 59 class DatadogSpanExporter(SpanExporter): 60 """Datadog span exporter for OpenTelemetry. 61 62 Args: 63 agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable 64 service: The service name to be used for the application or use ``DD_SERVICE`` environment variable 65 env: Set the application’s environment or use ``DD_ENV`` environment variable 66 version: Set the application’s version or use ``DD_VERSION`` environment variable 67 tags: A list (formatted as a comma-separated string) of default tags to be added to every span or use ``DD_TAGS`` environment variable 68 """ 69 70 def __init__( 71 self, agent_url=None, service=None, env=None, version=None, tags=None 72 ): 73 self.agent_url = ( 74 agent_url 75 if agent_url 76 else os.environ.get("DD_TRACE_AGENT_URL", DEFAULT_AGENT_URL) 77 ) 78 self.service = service or os.environ.get("DD_SERVICE") 79 self.env = env or os.environ.get("DD_ENV") 80 self.version = version or os.environ.get("DD_VERSION") 81 self.tags = _parse_tags_str(tags or os.environ.get("DD_TAGS")) 82 self._agent_writer = None 83 84 @property 85 def agent_writer(self): 86 if self._agent_writer is None: 87 url_parsed = urlparse(self.agent_url) 88 if url_parsed.scheme in ("http", "https"): 89 self._agent_writer = AgentWriter( 90 hostname=url_parsed.hostname, 91 port=url_parsed.port, 92 https=url_parsed.scheme == "https", 93 ) 94 elif url_parsed.scheme == "unix": 95 self._agent_writer = AgentWriter(uds_path=url_parsed.path) 96 else: 97 raise ValueError( 98 "Unknown scheme `%s` for agent URL" % url_parsed.scheme 99 ) 100 return self._agent_writer 101 102 def export(self, spans): 103 datadog_spans = self._translate_to_datadog(spans) 104 105 self.agent_writer.write(spans=datadog_spans) 106 107 return SpanExportResult.SUCCESS 108 109 def shutdown(self): 110 if self.agent_writer.started: 111 self.agent_writer.stop() 112 self.agent_writer.join(self.agent_writer.exit_timeout) 113 114 # pylint: disable=too-many-locals 115 def _translate_to_datadog(self, spans): 116 datadog_spans = [] 117 118 for span in spans: 119 trace_id, parent_id, span_id = _get_trace_ids(span) 120 121 # datadog Span is initialized with a reference to the tracer which is 122 # used to record the span when it is finished. We can skip ignore this 123 # because we are not calling the finish method and explictly set the 124 # duration. 125 tracer = None 126 127 # extract resource attributes to be used as tags as well as potential service name 128 [ 129 resource_tags, 130 resource_service_name, 131 ] = _extract_tags_from_resource(span.resource) 132 133 datadog_span = DatadogSpan( 134 tracer, 135 _get_span_name(span), 136 service=resource_service_name or self.service, 137 resource=_get_resource(span), 138 span_type=_get_span_type(span), 139 trace_id=trace_id, 140 span_id=span_id, 141 parent_id=parent_id, 142 ) 143 datadog_span.start_ns = span.start_time 144 datadog_span.duration_ns = span.end_time - span.start_time 145 146 if not span.status.is_ok: 147 datadog_span.error = 1 148 if span.status.description: 149 exc_type, exc_val = _get_exc_info(span) 150 # no mapping for error.stack since traceback not recorded 151 datadog_span.set_tag("error.msg", exc_val) 152 datadog_span.set_tag("error.type", exc_type) 153 154 # combine resource attributes and span attributes, don't modify existing span attributes 155 combined_span_tags = {} 156 combined_span_tags.update(resource_tags) 157 combined_span_tags.update(span.attributes) 158 159 datadog_span.set_tags(combined_span_tags) 160 161 # add configured env tag 162 if self.env is not None: 163 datadog_span.set_tag(ENV_KEY, self.env) 164 165 # add configured application version tag to only root span 166 if self.version is not None and parent_id == 0: 167 datadog_span.set_tag(VERSION_KEY, self.version) 168 169 # add configured global tags 170 datadog_span.set_tags(self.tags) 171 172 # add origin to root span 173 origin = _get_origin(span) 174 if origin and parent_id == 0: 175 datadog_span.set_tag(DD_ORIGIN, origin) 176 177 sampling_rate = _get_sampling_rate(span) 178 if sampling_rate is not None: 179 datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate) 180 181 # span events and span links are not supported 182 183 datadog_spans.append(datadog_span) 184 185 return datadog_spans 186 187 188 def _get_trace_ids(span): 189 """Extract tracer ids from span""" 190 ctx = span.get_span_context() 191 trace_id = ctx.trace_id 192 span_id = ctx.span_id 193 194 if isinstance(span.parent, trace_api.Span): 195 parent_id = span.parent.get_span_context().span_id 196 elif isinstance(span.parent, trace_api.SpanContext): 197 parent_id = span.parent.span_id 198 else: 199 parent_id = 0 200 201 trace_id = _convert_trace_id_uint64(trace_id) 202 203 return trace_id, parent_id, span_id 204 205 206 def _convert_trace_id_uint64(otel_id): 207 """Convert 128-bit int used for trace_id to 64-bit unsigned int""" 208 return otel_id & 0xFFFFFFFFFFFFFFFF 209 210 211 def _get_span_name(span): 212 """Get span name by using instrumentation and kind while backing off to 213 span.name 214 """ 215 instrumentation_name = ( 216 span.instrumentation_info.name if span.instrumentation_info else None 217 ) 218 span_kind_name = span.kind.name if span.kind else None 219 name = ( 220 "{}.{}".format(instrumentation_name, span_kind_name) 221 if instrumentation_name and span_kind_name 222 else span.name 223 ) 224 return name 225 226 227 def _get_resource(span): 228 """Get resource name for span""" 229 if SpanAttributes.HTTP_METHOD in span.attributes: 230 route = span.attributes.get(SpanAttributes.HTTP_ROUTE) 231 return ( 232 span.attributes[SpanAttributes.HTTP_METHOD] + " " + route 233 if route 234 else span.attributes[SpanAttributes.HTTP_METHOD] 235 ) 236 237 return span.name 238 239 240 def _get_span_type(span): 241 """Get Datadog span type""" 242 instrumentation_name = ( 243 span.instrumentation_info.name if span.instrumentation_info else None 244 ) 245 span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name) 246 return span_type 247 248 249 def _get_exc_info(span): 250 """Parse span status description for exception type and value""" 251 exc_type, exc_val = span.status.description.split(":", 1) 252 return exc_type, exc_val.strip() 253 254 255 def _get_origin(span): 256 ctx = span.get_span_context() 257 origin = ctx.trace_state.get(DD_ORIGIN) 258 return origin 259 260 261 def _get_sampling_rate(span): 262 ctx = span.get_span_context() 263 tracer_provider = trace_api.get_tracer_provider() 264 if not hasattr(tracer_provider, "sampler"): 265 return None 266 sampler = tracer_provider.sampler 267 return ( 268 sampler.rate 269 if ctx.trace_flags.sampled 270 and isinstance(sampler, sampling.TraceIdRatioBased) 271 else None 272 ) 273 274 275 def _parse_tags_str(tags_str): 276 """Parse a string of tags typically provided via environment variables. 277 278 The expected string is of the form:: 279 "key1:value1,key2:value2" 280 281 :param tags_str: A string of the above form to parse tags from. 282 :return: A dict containing the tags that were parsed. 283 """ 284 parsed_tags = {} 285 if not tags_str: 286 return parsed_tags 287 288 for tag in tags_str.split(","): 289 try: 290 key, value = tag.split(":", 1) 291 292 # Validate the tag 293 if key == "" or value == "" or value.endswith(":"): 294 raise ValueError 295 except ValueError: 296 logger.error( 297 "Malformed tag in tag pair '%s' from tag string '%s'.", 298 tag, 299 tags_str, 300 ) 301 else: 302 parsed_tags[key] = value 303 304 return parsed_tags 305 306 307 def _extract_tags_from_resource(resource): 308 """Parse tags from resource.attributes, except service.name which 309 has special significance within datadog""" 310 tags = {} 311 service_name = None 312 if not (resource and getattr(resource, "attributes", None)): 313 return [tags, service_name] 314 315 for attribute_key, attribute_value in resource.attributes.items(): 316 if attribute_key == SERVICE_NAME_TAG: 317 service_name = attribute_value 318 else: 319 tags[attribute_key] = attribute_value 320 return [tags, service_name] 321 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py --- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py +++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py @@ -7,3 +7,10 @@ ENV_KEY = "env" VERSION_KEY = "version" SERVICE_NAME_TAG = "service.name" +EVENT_NAME_EXCEPTION = "exception" +EXCEPTION_TYPE_ATTR_KEY = "exception.type" +EXCEPTION_MSG_ATTR_KEY = "exception.message" +EXCEPTION_STACK_ATTR_KEY = "exception.stacktrace" +DD_ERROR_TYPE_TAG_KEY = "error.type" +DD_ERROR_MSG_TAG_KEY = "error.msg" +DD_ERROR_STACK_TAG_KEY = "error.stack" diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py --- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py +++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py @@ -22,8 +22,15 @@ import opentelemetry.trace as trace_api from opentelemetry.exporter.datadog.constants import ( + DD_ERROR_MSG_TAG_KEY, + DD_ERROR_STACK_TAG_KEY, + DD_ERROR_TYPE_TAG_KEY, DD_ORIGIN, ENV_KEY, + EVENT_NAME_EXCEPTION, + EXCEPTION_MSG_ATTR_KEY, + EXCEPTION_STACK_ATTR_KEY, + EXCEPTION_TYPE_ATTR_KEY, SAMPLE_RATE_METRIC_KEY, SERVICE_NAME_TAG, VERSION_KEY, @@ -145,11 +152,12 @@ if not span.status.is_ok: datadog_span.error = 1 - if span.status.description: - exc_type, exc_val = _get_exc_info(span) - # no mapping for error.stack since traceback not recorded - datadog_span.set_tag("error.msg", exc_val) - datadog_span.set_tag("error.type", exc_type) + # loop over events and look for exception events, extract info. + # https://github.com/open-telemetry/opentelemetry-python/blob/71e3a7a192c0fc8a7503fac967ada36a74b79e58/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L810-L819 + if span.events: + _extract_tags_from_exception_events( + span.events, datadog_span + ) # combine resource attributes and span attributes, don't modify existing span attributes combined_span_tags = {} @@ -178,7 +186,7 @@ if sampling_rate is not None: datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate) - # span events and span links are not supported + # span events and span links are not supported except for extracting exception event context datadog_spans.append(datadog_span) @@ -318,3 +326,17 @@ else: tags[attribute_key] = attribute_value return [tags, service_name] + + +def _extract_tags_from_exception_events(events, datadog_span): + """Parse error tags from exception events, error.msg error.type + and error.stack have special significance within datadog""" + for event in events: + if event.name is not None and event.name == EVENT_NAME_EXCEPTION: + for key, value in event.attributes.items(): + if key == EXCEPTION_TYPE_ATTR_KEY: + datadog_span.set_tag(DD_ERROR_TYPE_TAG_KEY, value) + elif key == EXCEPTION_MSG_ATTR_KEY: + datadog_span.set_tag(DD_ERROR_MSG_TAG_KEY, value) + elif key == EXCEPTION_STACK_ATTR_KEY: + datadog_span.set_tag(DD_ERROR_STACK_TAG_KEY, value)
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py\n--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py\n+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py\n@@ -7,3 +7,10 @@\n ENV_KEY = \"env\"\n VERSION_KEY = \"version\"\n SERVICE_NAME_TAG = \"service.name\"\n+EVENT_NAME_EXCEPTION = \"exception\"\n+EXCEPTION_TYPE_ATTR_KEY = \"exception.type\"\n+EXCEPTION_MSG_ATTR_KEY = \"exception.message\"\n+EXCEPTION_STACK_ATTR_KEY = \"exception.stacktrace\"\n+DD_ERROR_TYPE_TAG_KEY = \"error.type\"\n+DD_ERROR_MSG_TAG_KEY = \"error.msg\"\n+DD_ERROR_STACK_TAG_KEY = \"error.stack\"\ndiff --git a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n--- a/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n+++ b/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py\n@@ -22,8 +22,15 @@\n \n import opentelemetry.trace as trace_api\n from opentelemetry.exporter.datadog.constants import (\n+ DD_ERROR_MSG_TAG_KEY,\n+ DD_ERROR_STACK_TAG_KEY,\n+ DD_ERROR_TYPE_TAG_KEY,\n DD_ORIGIN,\n ENV_KEY,\n+ EVENT_NAME_EXCEPTION,\n+ EXCEPTION_MSG_ATTR_KEY,\n+ EXCEPTION_STACK_ATTR_KEY,\n+ EXCEPTION_TYPE_ATTR_KEY,\n SAMPLE_RATE_METRIC_KEY,\n SERVICE_NAME_TAG,\n VERSION_KEY,\n@@ -145,11 +152,12 @@\n \n if not span.status.is_ok:\n datadog_span.error = 1\n- if span.status.description:\n- exc_type, exc_val = _get_exc_info(span)\n- # no mapping for error.stack since traceback not recorded\n- datadog_span.set_tag(\"error.msg\", exc_val)\n- datadog_span.set_tag(\"error.type\", exc_type)\n+ # loop over events and look for exception events, extract info.\n+ # https://github.com/open-telemetry/opentelemetry-python/blob/71e3a7a192c0fc8a7503fac967ada36a74b79e58/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L810-L819\n+ if span.events:\n+ _extract_tags_from_exception_events(\n+ span.events, datadog_span\n+ )\n \n # combine resource attributes and span attributes, don't modify existing span attributes\n combined_span_tags = {}\n@@ -178,7 +186,7 @@\n if sampling_rate is not None:\n datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)\n \n- # span events and span links are not supported\n+ # span events and span links are not supported except for extracting exception event context\n \n datadog_spans.append(datadog_span)\n \n@@ -318,3 +326,17 @@\n else:\n tags[attribute_key] = attribute_value\n return [tags, service_name]\n+\n+\n+def _extract_tags_from_exception_events(events, datadog_span):\n+ \"\"\"Parse error tags from exception events, error.msg error.type\n+ and error.stack have special significance within datadog\"\"\"\n+ for event in events:\n+ if event.name is not None and event.name == EVENT_NAME_EXCEPTION:\n+ for key, value in event.attributes.items():\n+ if key == EXCEPTION_TYPE_ATTR_KEY:\n+ datadog_span.set_tag(DD_ERROR_TYPE_TAG_KEY, value)\n+ elif key == EXCEPTION_MSG_ATTR_KEY:\n+ datadog_span.set_tag(DD_ERROR_MSG_TAG_KEY, value)\n+ elif key == EXCEPTION_STACK_ATTR_KEY:\n+ datadog_span.set_tag(DD_ERROR_STACK_TAG_KEY, value)\n", "issue": "Datadog exporter shouldn't parse span description to determine if there was an exception\nThe Datadog exporter currently [parses the status description](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L252) to determine if an exception was raised, yet [the specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#record-exception) requires these to be recorded as events (and [the python code already does this](https://github.com/open-telemetry/opentelemetry-python/blob/master/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L690-L715)), so the Datadog exporter should read this data from the event list instead of hoping the description is properly formatted.\r\n\r\nThe current code does say that [span events are not supported ](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/master/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L252), and while that would suggest that *reporting* events are not supported, we could certainly parse them for this data.\r\n\r\nI'll probably tackle this, I'm already working on #154. Also mentioned in #236, where I worked around the current behavior.\n", "before_files": [{"content": "DD_ORIGIN = \"dd_origin\"\nAUTO_REJECT = 0\nAUTO_KEEP = 1\nUSER_KEEP = 2\nSAMPLE_RATE_METRIC_KEY = \"_sample_rate\"\nSAMPLING_PRIORITY_KEY = \"_sampling_priority_v1\"\nENV_KEY = \"env\"\nVERSION_KEY = \"version\"\nSERVICE_NAME_TAG = \"service.name\"\n", "path": "exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nfrom ddtrace.ext import SpanTypes as DatadogSpanTypes\nfrom ddtrace.internal.writer import AgentWriter\nfrom ddtrace.span import Span as DatadogSpan\n\nimport opentelemetry.trace as trace_api\nfrom opentelemetry.exporter.datadog.constants import (\n DD_ORIGIN,\n ENV_KEY,\n SAMPLE_RATE_METRIC_KEY,\n SERVICE_NAME_TAG,\n VERSION_KEY,\n)\nfrom opentelemetry.sdk.trace import sampling\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_AGENT_URL = \"http://localhost:8126\"\n_INSTRUMENTATION_SPAN_TYPES = {\n \"opentelemetry.instrumentation.aiohttp-client\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.asgi\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.dbapi\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.django\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.flask\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.grpc\": DatadogSpanTypes.GRPC,\n \"opentelemetry.instrumentation.jinja2\": DatadogSpanTypes.TEMPLATE,\n \"opentelemetry.instrumentation.mysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.psycopg2\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.pymemcache\": DatadogSpanTypes.CACHE,\n \"opentelemetry.instrumentation.pymongo\": DatadogSpanTypes.MONGODB,\n \"opentelemetry.instrumentation.pymysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.redis\": DatadogSpanTypes.REDIS,\n \"opentelemetry.instrumentation.requests\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.sqlalchemy\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.wsgi\": DatadogSpanTypes.WEB,\n}\n\n\nclass DatadogSpanExporter(SpanExporter):\n \"\"\"Datadog span exporter for OpenTelemetry.\n\n Args:\n agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable\n service: The service name to be used for the application or use ``DD_SERVICE`` environment variable\n env: Set the application\u2019s environment or use ``DD_ENV`` environment variable\n version: Set the application\u2019s version or use ``DD_VERSION`` environment variable\n tags: A list (formatted as a comma-separated string) of default tags to be added to every span or use ``DD_TAGS`` environment variable\n \"\"\"\n\n def __init__(\n self, agent_url=None, service=None, env=None, version=None, tags=None\n ):\n self.agent_url = (\n agent_url\n if agent_url\n else os.environ.get(\"DD_TRACE_AGENT_URL\", DEFAULT_AGENT_URL)\n )\n self.service = service or os.environ.get(\"DD_SERVICE\")\n self.env = env or os.environ.get(\"DD_ENV\")\n self.version = version or os.environ.get(\"DD_VERSION\")\n self.tags = _parse_tags_str(tags or os.environ.get(\"DD_TAGS\"))\n self._agent_writer = None\n\n @property\n def agent_writer(self):\n if self._agent_writer is None:\n url_parsed = urlparse(self.agent_url)\n if url_parsed.scheme in (\"http\", \"https\"):\n self._agent_writer = AgentWriter(\n hostname=url_parsed.hostname,\n port=url_parsed.port,\n https=url_parsed.scheme == \"https\",\n )\n elif url_parsed.scheme == \"unix\":\n self._agent_writer = AgentWriter(uds_path=url_parsed.path)\n else:\n raise ValueError(\n \"Unknown scheme `%s` for agent URL\" % url_parsed.scheme\n )\n return self._agent_writer\n\n def export(self, spans):\n datadog_spans = self._translate_to_datadog(spans)\n\n self.agent_writer.write(spans=datadog_spans)\n\n return SpanExportResult.SUCCESS\n\n def shutdown(self):\n if self.agent_writer.started:\n self.agent_writer.stop()\n self.agent_writer.join(self.agent_writer.exit_timeout)\n\n # pylint: disable=too-many-locals\n def _translate_to_datadog(self, spans):\n datadog_spans = []\n\n for span in spans:\n trace_id, parent_id, span_id = _get_trace_ids(span)\n\n # datadog Span is initialized with a reference to the tracer which is\n # used to record the span when it is finished. We can skip ignore this\n # because we are not calling the finish method and explictly set the\n # duration.\n tracer = None\n\n # extract resource attributes to be used as tags as well as potential service name\n [\n resource_tags,\n resource_service_name,\n ] = _extract_tags_from_resource(span.resource)\n\n datadog_span = DatadogSpan(\n tracer,\n _get_span_name(span),\n service=resource_service_name or self.service,\n resource=_get_resource(span),\n span_type=_get_span_type(span),\n trace_id=trace_id,\n span_id=span_id,\n parent_id=parent_id,\n )\n datadog_span.start_ns = span.start_time\n datadog_span.duration_ns = span.end_time - span.start_time\n\n if not span.status.is_ok:\n datadog_span.error = 1\n if span.status.description:\n exc_type, exc_val = _get_exc_info(span)\n # no mapping for error.stack since traceback not recorded\n datadog_span.set_tag(\"error.msg\", exc_val)\n datadog_span.set_tag(\"error.type\", exc_type)\n\n # combine resource attributes and span attributes, don't modify existing span attributes\n combined_span_tags = {}\n combined_span_tags.update(resource_tags)\n combined_span_tags.update(span.attributes)\n\n datadog_span.set_tags(combined_span_tags)\n\n # add configured env tag\n if self.env is not None:\n datadog_span.set_tag(ENV_KEY, self.env)\n\n # add configured application version tag to only root span\n if self.version is not None and parent_id == 0:\n datadog_span.set_tag(VERSION_KEY, self.version)\n\n # add configured global tags\n datadog_span.set_tags(self.tags)\n\n # add origin to root span\n origin = _get_origin(span)\n if origin and parent_id == 0:\n datadog_span.set_tag(DD_ORIGIN, origin)\n\n sampling_rate = _get_sampling_rate(span)\n if sampling_rate is not None:\n datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)\n\n # span events and span links are not supported\n\n datadog_spans.append(datadog_span)\n\n return datadog_spans\n\n\ndef _get_trace_ids(span):\n \"\"\"Extract tracer ids from span\"\"\"\n ctx = span.get_span_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n if isinstance(span.parent, trace_api.Span):\n parent_id = span.parent.get_span_context().span_id\n elif isinstance(span.parent, trace_api.SpanContext):\n parent_id = span.parent.span_id\n else:\n parent_id = 0\n\n trace_id = _convert_trace_id_uint64(trace_id)\n\n return trace_id, parent_id, span_id\n\n\ndef _convert_trace_id_uint64(otel_id):\n \"\"\"Convert 128-bit int used for trace_id to 64-bit unsigned int\"\"\"\n return otel_id & 0xFFFFFFFFFFFFFFFF\n\n\ndef _get_span_name(span):\n \"\"\"Get span name by using instrumentation and kind while backing off to\n span.name\n \"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_kind_name = span.kind.name if span.kind else None\n name = (\n \"{}.{}\".format(instrumentation_name, span_kind_name)\n if instrumentation_name and span_kind_name\n else span.name\n )\n return name\n\n\ndef _get_resource(span):\n \"\"\"Get resource name for span\"\"\"\n if SpanAttributes.HTTP_METHOD in span.attributes:\n route = span.attributes.get(SpanAttributes.HTTP_ROUTE)\n return (\n span.attributes[SpanAttributes.HTTP_METHOD] + \" \" + route\n if route\n else span.attributes[SpanAttributes.HTTP_METHOD]\n )\n\n return span.name\n\n\ndef _get_span_type(span):\n \"\"\"Get Datadog span type\"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)\n return span_type\n\n\ndef _get_exc_info(span):\n \"\"\"Parse span status description for exception type and value\"\"\"\n exc_type, exc_val = span.status.description.split(\":\", 1)\n return exc_type, exc_val.strip()\n\n\ndef _get_origin(span):\n ctx = span.get_span_context()\n origin = ctx.trace_state.get(DD_ORIGIN)\n return origin\n\n\ndef _get_sampling_rate(span):\n ctx = span.get_span_context()\n tracer_provider = trace_api.get_tracer_provider()\n if not hasattr(tracer_provider, \"sampler\"):\n return None\n sampler = tracer_provider.sampler\n return (\n sampler.rate\n if ctx.trace_flags.sampled\n and isinstance(sampler, sampling.TraceIdRatioBased)\n else None\n )\n\n\ndef _parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n logger.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n\n\ndef _extract_tags_from_resource(resource):\n \"\"\"Parse tags from resource.attributes, except service.name which\n has special significance within datadog\"\"\"\n tags = {}\n service_name = None\n if not (resource and getattr(resource, \"attributes\", None)):\n return [tags, service_name]\n\n for attribute_key, attribute_value in resource.attributes.items():\n if attribute_key == SERVICE_NAME_TAG:\n service_name = attribute_value\n else:\n tags[attribute_key] = attribute_value\n return [tags, service_name]\n", "path": "exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py"}], "after_files": [{"content": "DD_ORIGIN = \"dd_origin\"\nAUTO_REJECT = 0\nAUTO_KEEP = 1\nUSER_KEEP = 2\nSAMPLE_RATE_METRIC_KEY = \"_sample_rate\"\nSAMPLING_PRIORITY_KEY = \"_sampling_priority_v1\"\nENV_KEY = \"env\"\nVERSION_KEY = \"version\"\nSERVICE_NAME_TAG = \"service.name\"\nEVENT_NAME_EXCEPTION = \"exception\"\nEXCEPTION_TYPE_ATTR_KEY = \"exception.type\"\nEXCEPTION_MSG_ATTR_KEY = \"exception.message\"\nEXCEPTION_STACK_ATTR_KEY = \"exception.stacktrace\"\nDD_ERROR_TYPE_TAG_KEY = \"error.type\"\nDD_ERROR_MSG_TAG_KEY = \"error.msg\"\nDD_ERROR_STACK_TAG_KEY = \"error.stack\"\n", "path": "exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/constants.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nfrom ddtrace.ext import SpanTypes as DatadogSpanTypes\nfrom ddtrace.internal.writer import AgentWriter\nfrom ddtrace.span import Span as DatadogSpan\n\nimport opentelemetry.trace as trace_api\nfrom opentelemetry.exporter.datadog.constants import (\n DD_ERROR_MSG_TAG_KEY,\n DD_ERROR_STACK_TAG_KEY,\n DD_ERROR_TYPE_TAG_KEY,\n DD_ORIGIN,\n ENV_KEY,\n EVENT_NAME_EXCEPTION,\n EXCEPTION_MSG_ATTR_KEY,\n EXCEPTION_STACK_ATTR_KEY,\n EXCEPTION_TYPE_ATTR_KEY,\n SAMPLE_RATE_METRIC_KEY,\n SERVICE_NAME_TAG,\n VERSION_KEY,\n)\nfrom opentelemetry.sdk.trace import sampling\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_AGENT_URL = \"http://localhost:8126\"\n_INSTRUMENTATION_SPAN_TYPES = {\n \"opentelemetry.instrumentation.aiohttp-client\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.asgi\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.dbapi\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.django\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.flask\": DatadogSpanTypes.WEB,\n \"opentelemetry.instrumentation.grpc\": DatadogSpanTypes.GRPC,\n \"opentelemetry.instrumentation.jinja2\": DatadogSpanTypes.TEMPLATE,\n \"opentelemetry.instrumentation.mysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.psycopg2\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.pymemcache\": DatadogSpanTypes.CACHE,\n \"opentelemetry.instrumentation.pymongo\": DatadogSpanTypes.MONGODB,\n \"opentelemetry.instrumentation.pymysql\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.redis\": DatadogSpanTypes.REDIS,\n \"opentelemetry.instrumentation.requests\": DatadogSpanTypes.HTTP,\n \"opentelemetry.instrumentation.sqlalchemy\": DatadogSpanTypes.SQL,\n \"opentelemetry.instrumentation.wsgi\": DatadogSpanTypes.WEB,\n}\n\n\nclass DatadogSpanExporter(SpanExporter):\n \"\"\"Datadog span exporter for OpenTelemetry.\n\n Args:\n agent_url: The url of the Datadog Agent or use ``DD_TRACE_AGENT_URL`` environment variable\n service: The service name to be used for the application or use ``DD_SERVICE`` environment variable\n env: Set the application\u2019s environment or use ``DD_ENV`` environment variable\n version: Set the application\u2019s version or use ``DD_VERSION`` environment variable\n tags: A list (formatted as a comma-separated string) of default tags to be added to every span or use ``DD_TAGS`` environment variable\n \"\"\"\n\n def __init__(\n self, agent_url=None, service=None, env=None, version=None, tags=None\n ):\n self.agent_url = (\n agent_url\n if agent_url\n else os.environ.get(\"DD_TRACE_AGENT_URL\", DEFAULT_AGENT_URL)\n )\n self.service = service or os.environ.get(\"DD_SERVICE\")\n self.env = env or os.environ.get(\"DD_ENV\")\n self.version = version or os.environ.get(\"DD_VERSION\")\n self.tags = _parse_tags_str(tags or os.environ.get(\"DD_TAGS\"))\n self._agent_writer = None\n\n @property\n def agent_writer(self):\n if self._agent_writer is None:\n url_parsed = urlparse(self.agent_url)\n if url_parsed.scheme in (\"http\", \"https\"):\n self._agent_writer = AgentWriter(\n hostname=url_parsed.hostname,\n port=url_parsed.port,\n https=url_parsed.scheme == \"https\",\n )\n elif url_parsed.scheme == \"unix\":\n self._agent_writer = AgentWriter(uds_path=url_parsed.path)\n else:\n raise ValueError(\n \"Unknown scheme `%s` for agent URL\" % url_parsed.scheme\n )\n return self._agent_writer\n\n def export(self, spans):\n datadog_spans = self._translate_to_datadog(spans)\n\n self.agent_writer.write(spans=datadog_spans)\n\n return SpanExportResult.SUCCESS\n\n def shutdown(self):\n if self.agent_writer.started:\n self.agent_writer.stop()\n self.agent_writer.join(self.agent_writer.exit_timeout)\n\n # pylint: disable=too-many-locals\n def _translate_to_datadog(self, spans):\n datadog_spans = []\n\n for span in spans:\n trace_id, parent_id, span_id = _get_trace_ids(span)\n\n # datadog Span is initialized with a reference to the tracer which is\n # used to record the span when it is finished. We can skip ignore this\n # because we are not calling the finish method and explictly set the\n # duration.\n tracer = None\n\n # extract resource attributes to be used as tags as well as potential service name\n [\n resource_tags,\n resource_service_name,\n ] = _extract_tags_from_resource(span.resource)\n\n datadog_span = DatadogSpan(\n tracer,\n _get_span_name(span),\n service=resource_service_name or self.service,\n resource=_get_resource(span),\n span_type=_get_span_type(span),\n trace_id=trace_id,\n span_id=span_id,\n parent_id=parent_id,\n )\n datadog_span.start_ns = span.start_time\n datadog_span.duration_ns = span.end_time - span.start_time\n\n if not span.status.is_ok:\n datadog_span.error = 1\n # loop over events and look for exception events, extract info.\n # https://github.com/open-telemetry/opentelemetry-python/blob/71e3a7a192c0fc8a7503fac967ada36a74b79e58/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py#L810-L819\n if span.events:\n _extract_tags_from_exception_events(\n span.events, datadog_span\n )\n\n # combine resource attributes and span attributes, don't modify existing span attributes\n combined_span_tags = {}\n combined_span_tags.update(resource_tags)\n combined_span_tags.update(span.attributes)\n\n datadog_span.set_tags(combined_span_tags)\n\n # add configured env tag\n if self.env is not None:\n datadog_span.set_tag(ENV_KEY, self.env)\n\n # add configured application version tag to only root span\n if self.version is not None and parent_id == 0:\n datadog_span.set_tag(VERSION_KEY, self.version)\n\n # add configured global tags\n datadog_span.set_tags(self.tags)\n\n # add origin to root span\n origin = _get_origin(span)\n if origin and parent_id == 0:\n datadog_span.set_tag(DD_ORIGIN, origin)\n\n sampling_rate = _get_sampling_rate(span)\n if sampling_rate is not None:\n datadog_span.set_metric(SAMPLE_RATE_METRIC_KEY, sampling_rate)\n\n # span events and span links are not supported except for extracting exception event context\n\n datadog_spans.append(datadog_span)\n\n return datadog_spans\n\n\ndef _get_trace_ids(span):\n \"\"\"Extract tracer ids from span\"\"\"\n ctx = span.get_span_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n if isinstance(span.parent, trace_api.Span):\n parent_id = span.parent.get_span_context().span_id\n elif isinstance(span.parent, trace_api.SpanContext):\n parent_id = span.parent.span_id\n else:\n parent_id = 0\n\n trace_id = _convert_trace_id_uint64(trace_id)\n\n return trace_id, parent_id, span_id\n\n\ndef _convert_trace_id_uint64(otel_id):\n \"\"\"Convert 128-bit int used for trace_id to 64-bit unsigned int\"\"\"\n return otel_id & 0xFFFFFFFFFFFFFFFF\n\n\ndef _get_span_name(span):\n \"\"\"Get span name by using instrumentation and kind while backing off to\n span.name\n \"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_kind_name = span.kind.name if span.kind else None\n name = (\n \"{}.{}\".format(instrumentation_name, span_kind_name)\n if instrumentation_name and span_kind_name\n else span.name\n )\n return name\n\n\ndef _get_resource(span):\n \"\"\"Get resource name for span\"\"\"\n if SpanAttributes.HTTP_METHOD in span.attributes:\n route = span.attributes.get(SpanAttributes.HTTP_ROUTE)\n return (\n span.attributes[SpanAttributes.HTTP_METHOD] + \" \" + route\n if route\n else span.attributes[SpanAttributes.HTTP_METHOD]\n )\n\n return span.name\n\n\ndef _get_span_type(span):\n \"\"\"Get Datadog span type\"\"\"\n instrumentation_name = (\n span.instrumentation_info.name if span.instrumentation_info else None\n )\n span_type = _INSTRUMENTATION_SPAN_TYPES.get(instrumentation_name)\n return span_type\n\n\ndef _get_exc_info(span):\n \"\"\"Parse span status description for exception type and value\"\"\"\n exc_type, exc_val = span.status.description.split(\":\", 1)\n return exc_type, exc_val.strip()\n\n\ndef _get_origin(span):\n ctx = span.get_span_context()\n origin = ctx.trace_state.get(DD_ORIGIN)\n return origin\n\n\ndef _get_sampling_rate(span):\n ctx = span.get_span_context()\n tracer_provider = trace_api.get_tracer_provider()\n if not hasattr(tracer_provider, \"sampler\"):\n return None\n sampler = tracer_provider.sampler\n return (\n sampler.rate\n if ctx.trace_flags.sampled\n and isinstance(sampler, sampling.TraceIdRatioBased)\n else None\n )\n\n\ndef _parse_tags_str(tags_str):\n \"\"\"Parse a string of tags typically provided via environment variables.\n\n The expected string is of the form::\n \"key1:value1,key2:value2\"\n\n :param tags_str: A string of the above form to parse tags from.\n :return: A dict containing the tags that were parsed.\n \"\"\"\n parsed_tags = {}\n if not tags_str:\n return parsed_tags\n\n for tag in tags_str.split(\",\"):\n try:\n key, value = tag.split(\":\", 1)\n\n # Validate the tag\n if key == \"\" or value == \"\" or value.endswith(\":\"):\n raise ValueError\n except ValueError:\n logger.error(\n \"Malformed tag in tag pair '%s' from tag string '%s'.\",\n tag,\n tags_str,\n )\n else:\n parsed_tags[key] = value\n\n return parsed_tags\n\n\ndef _extract_tags_from_resource(resource):\n \"\"\"Parse tags from resource.attributes, except service.name which\n has special significance within datadog\"\"\"\n tags = {}\n service_name = None\n if not (resource and getattr(resource, \"attributes\", None)):\n return [tags, service_name]\n\n for attribute_key, attribute_value in resource.attributes.items():\n if attribute_key == SERVICE_NAME_TAG:\n service_name = attribute_value\n else:\n tags[attribute_key] = attribute_value\n return [tags, service_name]\n\n\ndef _extract_tags_from_exception_events(events, datadog_span):\n \"\"\"Parse error tags from exception events, error.msg error.type\n and error.stack have special significance within datadog\"\"\"\n for event in events:\n if event.name is not None and event.name == EVENT_NAME_EXCEPTION:\n for key, value in event.attributes.items():\n if key == EXCEPTION_TYPE_ATTR_KEY:\n datadog_span.set_tag(DD_ERROR_TYPE_TAG_KEY, value)\n elif key == EXCEPTION_MSG_ATTR_KEY:\n datadog_span.set_tag(DD_ERROR_MSG_TAG_KEY, value)\n elif key == EXCEPTION_STACK_ATTR_KEY:\n datadog_span.set_tag(DD_ERROR_STACK_TAG_KEY, value)\n", "path": "exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py"}]}
4,058
932
gh_patches_debug_7183
rasdani/github-patches
git_diff
scikit-hep__pyhf-1179
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- json2xml breaks for a simple JSON # Description ``` { "channels": [ { "name": "singlechannel", "samples": [ { "name": "signal", "data": [ 5 ], "modifiers": [ { "name": "mu", "type": "normfactor", "data": null } ] }, { "name": "background", "data": [ 50 ], "modifiers": [ { "name": "uncorr_bkguncrt", "type": "shapesys", "data": [ 6 ] } ] } ] } ], "version": "1.0.0", "measurements": [ { "name": "measurement", "config": { "poi": "mu", "parameters": [ { "bounds": [ [ 0, 10 ] ], "inits": [ 1 ], "fixed": false, "name": "mu" }, { "bounds": [ [ 1e-10, 10 ] ], "inits": [ 1 ], "fixed": false, "name": "uncorr_bkguncrt" } ] } } ], "observations": [ { "name": "singlechannel", "data": [ 50 ] } ] } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/writexml.py` Content: ``` 1 import logging 2 3 from pathlib import Path 4 import shutil 5 import pkg_resources 6 import xml.etree.cElementTree as ET 7 import numpy as np 8 import uproot 9 from uproot_methods.classes import TH1 10 11 from .mixins import _ChannelSummaryMixin 12 13 _ROOT_DATA_FILE = None 14 15 log = logging.getLogger(__name__) 16 17 # 'spec' gets passed through all functions as NormFactor is a unique case of having 18 # parameter configurations stored at the modifier-definition-spec level. This means 19 # that build_modifier() needs access to the measurements. The call stack is: 20 # 21 # writexml 22 # ->build_channel 23 # ->build_sample 24 # ->build_modifier 25 # 26 # Therefore, 'spec' needs to be threaded through all these calls. 27 28 29 def _make_hist_name(channel, sample, modifier='', prefix='hist', suffix=''): 30 middle = '_'.join(filter(lambda x: x, [channel, sample, modifier])) 31 return f"{prefix}{middle}{suffix}" 32 33 34 def _export_root_histogram(histname, data): 35 h = TH1.from_numpy((np.asarray(data), np.arange(len(data) + 1))) 36 h._fName = histname 37 # NB: uproot crashes for some reason, figure out why later 38 # if histname in _ROOT_DATA_FILE: 39 # raise KeyError(f'Duplicate key {histname} being written.') 40 _ROOT_DATA_FILE[histname] = h 41 42 43 # https://stackoverflow.com/a/4590052 44 def indent(elem, level=0): 45 i = "\n" + level * " " 46 if elem: 47 if not elem.text or not elem.text.strip(): 48 elem.text = i + " " 49 if not elem.tail or not elem.tail.strip(): 50 elem.tail = i 51 for subelem in elem: 52 indent(subelem, level + 1) 53 if not elem.tail or not elem.tail.strip(): 54 elem.tail = i 55 else: 56 if level and (not elem.tail or not elem.tail.strip()): 57 elem.tail = i 58 59 60 def build_measurement(measurementspec, modifiertypes): 61 """ 62 Build the XML measurement specification for a given measurement adhering to defs.json/#definitions/measurement. 63 64 Args: 65 measurementspec (:obj:`dict`): The measurements specification from a :class:`~pyhf.workspace.Workspace`. 66 modifiertypes (:obj:`dict`): A mapping from modifier name (:obj:`str`) to modifier type (:obj:`str`). 67 68 Returns: 69 :class:`xml.etree.cElementTree.Element`: The XML measurement specification. 70 71 """ 72 # need to determine prefixes 73 prefixes = { 74 'normsys': 'alpha_', 75 'histosys': 'alpha_', 76 'shapesys': 'gamma_', 77 'staterror': 'gamma_', 78 } 79 80 config = measurementspec['config'] 81 name = measurementspec['name'] 82 poi = config['poi'] 83 84 # we want to know which parameters are fixed (constant) 85 # and to additionally extract the luminosity information 86 fixed_params = [] 87 lumi = 1.0 88 lumierr = 0.0 89 for parameter in config['parameters']: 90 if parameter.get('fixed', False): 91 pname = parameter['name'] 92 if pname == 'lumi': 93 fixed_params.append('Lumi') 94 else: 95 prefix = prefixes.get(modifiertypes[pname], '') 96 fixed_params.append(f'{prefix}{pname}') 97 # we found luminosity, so handle it 98 if parameter['name'] == 'lumi': 99 lumi = parameter['auxdata'][0] 100 lumierr = parameter['sigmas'][0] 101 102 # define measurement 103 meas = ET.Element( 104 "Measurement", 105 Name=name, 106 Lumi=str(lumi), 107 LumiRelErr=str(lumierr), 108 ExportOnly=str(True), 109 ) 110 poiel = ET.Element('POI') 111 poiel.text = poi 112 meas.append(poiel) 113 114 # add fixed parameters (constant) 115 if fixed_params: 116 se = ET.Element('ParamSetting', Const='True') 117 se.text = ' '.join(fixed_params) 118 meas.append(se) 119 return meas 120 121 122 def build_modifier(spec, modifierspec, channelname, samplename, sampledata): 123 if modifierspec['name'] == 'lumi': 124 return None 125 mod_map = { 126 'histosys': 'HistoSys', 127 'staterror': 'StatError', 128 'normsys': 'OverallSys', 129 'shapesys': 'ShapeSys', 130 'normfactor': 'NormFactor', 131 'shapefactor': 'ShapeFactor', 132 } 133 134 attrs = {'Name': modifierspec['name']} 135 if modifierspec['type'] == 'histosys': 136 attrs['HistoNameLow'] = _make_hist_name( 137 channelname, samplename, modifierspec['name'], suffix='Low' 138 ) 139 attrs['HistoNameHigh'] = _make_hist_name( 140 channelname, samplename, modifierspec['name'], suffix='High' 141 ) 142 _export_root_histogram(attrs['HistoNameLow'], modifierspec['data']['lo_data']) 143 _export_root_histogram(attrs['HistoNameHigh'], modifierspec['data']['hi_data']) 144 elif modifierspec['type'] == 'normsys': 145 attrs['High'] = str(modifierspec['data']['hi']) 146 attrs['Low'] = str(modifierspec['data']['lo']) 147 elif modifierspec['type'] == 'normfactor': 148 # NB: only look at first measurement for normfactor configs. In order 149 # to dump as HistFactory XML, this has to be the same for all 150 # measurements or it will not work correctly. Why? 151 # 152 # Unlike other modifiers, NormFactor has the unique circumstance of 153 # defining its parameter configurations at the modifier level inside 154 # the channel specification, instead of at the measurement level, like 155 # all of the other modifiers. 156 # 157 # However, since I strive for perfection, the "Const" attribute will 158 # never be set here, but at the per-measurement configuration instead 159 # like all other parameters. This is an acceptable compromise. 160 # 161 # Lastly, if a normfactor parameter configuration doesn't exist in the 162 # first measurement parameter configuration, then set defaults. 163 val = 1 164 low = 0 165 high = 10 166 for p in spec['measurements'][0]['config']['parameters']: 167 if p['name'] == modifierspec['name']: 168 val = p['inits'][0] 169 low, high = p['bounds'][0] 170 attrs['Val'] = str(val) 171 attrs['Low'] = str(low) 172 attrs['High'] = str(high) 173 elif modifierspec['type'] == 'staterror': 174 attrs['Activate'] = 'True' 175 attrs['HistoName'] = _make_hist_name( 176 channelname, samplename, modifierspec['name'] 177 ) 178 del attrs['Name'] 179 # need to make this a relative uncertainty stored in ROOT file 180 _export_root_histogram( 181 attrs['HistoName'], 182 np.divide( 183 modifierspec['data'], 184 sampledata, 185 out=np.zeros_like(sampledata), 186 where=np.asarray(sampledata) != 0, 187 dtype='float', 188 ).tolist(), 189 ) 190 elif modifierspec['type'] == 'shapesys': 191 attrs['ConstraintType'] = 'Poisson' 192 attrs['HistoName'] = _make_hist_name( 193 channelname, samplename, modifierspec['name'] 194 ) 195 # need to make this a relative uncertainty stored in ROOT file 196 _export_root_histogram( 197 attrs['HistoName'], 198 [ 199 np.divide( 200 a, b, out=np.zeros_like(a), where=np.asarray(b) != 0, dtype='float' 201 ) 202 for a, b in np.array((modifierspec['data'], sampledata)).T 203 ], 204 ) 205 elif modifierspec['type'] == 'shapefactor': 206 pass 207 else: 208 log.warning( 209 f"Skipping modifier {modifierspec['name']}({modifierspec['type']}) for now" 210 ) 211 return None 212 213 modifier = ET.Element(mod_map[modifierspec['type']], **attrs) 214 return modifier 215 216 217 def build_sample(spec, samplespec, channelname): 218 histname = _make_hist_name(channelname, samplespec['name']) 219 attrs = { 220 'Name': samplespec['name'], 221 'HistoName': histname, 222 'InputFile': _ROOT_DATA_FILE._path, 223 'NormalizeByTheory': 'False', 224 } 225 sample = ET.Element('Sample', **attrs) 226 for modspec in samplespec['modifiers']: 227 # if lumi modifier added for this sample, need to set NormalizeByTheory 228 if modspec['type'] == 'lumi': 229 sample.attrib.update({'NormalizeByTheory': 'True'}) 230 modifier = build_modifier( 231 spec, modspec, channelname, samplespec['name'], samplespec['data'] 232 ) 233 if modifier is not None: 234 sample.append(modifier) 235 _export_root_histogram(histname, samplespec['data']) 236 return sample 237 238 239 def build_data(obsspec, channelname): 240 histname = _make_hist_name(channelname, 'data') 241 data = ET.Element('Data', HistoName=histname, InputFile=_ROOT_DATA_FILE._path) 242 243 observation = next((obs for obs in obsspec if obs['name'] == channelname), None) 244 _export_root_histogram(histname, observation['data']) 245 return data 246 247 248 def build_channel(spec, channelspec, obsspec): 249 channel = ET.Element( 250 'Channel', Name=channelspec['name'], InputFile=_ROOT_DATA_FILE._path 251 ) 252 if obsspec: 253 data = build_data(obsspec, channelspec['name']) 254 channel.append(data) 255 for samplespec in channelspec['samples']: 256 channel.append(build_sample(spec, samplespec, channelspec['name'])) 257 return channel 258 259 260 def writexml(spec, specdir, data_rootdir, resultprefix): 261 global _ROOT_DATA_FILE 262 263 shutil.copyfile( 264 pkg_resources.resource_filename(__name__, 'schemas/HistFactorySchema.dtd'), 265 Path(specdir).parent.joinpath('HistFactorySchema.dtd'), 266 ) 267 combination = ET.Element( 268 "Combination", OutputFilePrefix=str(Path(specdir).joinpath(resultprefix)) 269 ) 270 271 with uproot.recreate( 272 str(Path(data_rootdir).joinpath('data.root')) 273 ) as _ROOT_DATA_FILE: 274 for channelspec in spec['channels']: 275 channelfilename = str( 276 Path(specdir).joinpath(f'{resultprefix}_{channelspec["name"]}.xml') 277 ) 278 with open(channelfilename, 'w') as channelfile: 279 channel = build_channel(spec, channelspec, spec.get('observations')) 280 indent(channel) 281 channelfile.write( 282 "<!DOCTYPE Channel SYSTEM '../HistFactorySchema.dtd'>\n\n" 283 ) 284 channelfile.write( 285 ET.tostring(channel, encoding='utf-8').decode('utf-8') 286 ) 287 288 inp = ET.Element("Input") 289 inp.text = channelfilename 290 combination.append(inp) 291 292 # need information about modifier types to get the right prefix in measurement 293 mixin = _ChannelSummaryMixin(channels=spec['channels']) 294 295 for measurement in spec['measurements']: 296 combination.append(build_measurement(measurement, dict(mixin.modifiers))) 297 indent(combination) 298 return b"<!DOCTYPE Combination SYSTEM 'HistFactorySchema.dtd'>\n\n" + ET.tostring( 299 combination, encoding='utf-8' 300 ) 301 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/writexml.py b/src/pyhf/writexml.py --- a/src/pyhf/writexml.py +++ b/src/pyhf/writexml.py @@ -199,7 +199,9 @@ np.divide( a, b, out=np.zeros_like(a), where=np.asarray(b) != 0, dtype='float' ) - for a, b in np.array((modifierspec['data'], sampledata)).T + for a, b in np.array( + (modifierspec['data'], sampledata), dtype="float" + ).T ], ) elif modifierspec['type'] == 'shapefactor':
{"golden_diff": "diff --git a/src/pyhf/writexml.py b/src/pyhf/writexml.py\n--- a/src/pyhf/writexml.py\n+++ b/src/pyhf/writexml.py\n@@ -199,7 +199,9 @@\n np.divide(\n a, b, out=np.zeros_like(a), where=np.asarray(b) != 0, dtype='float'\n )\n- for a, b in np.array((modifierspec['data'], sampledata)).T\n+ for a, b in np.array(\n+ (modifierspec['data'], sampledata), dtype=\"float\"\n+ ).T\n ],\n )\n elif modifierspec['type'] == 'shapefactor':\n", "issue": "json2xml breaks for a simple JSON\n# Description\r\n\r\n```\r\n{\r\n \"channels\": [\r\n {\r\n \"name\": \"singlechannel\",\r\n \"samples\": [\r\n {\r\n \"name\": \"signal\",\r\n \"data\": [\r\n 5\r\n ],\r\n \"modifiers\": [\r\n {\r\n \"name\": \"mu\",\r\n \"type\": \"normfactor\",\r\n \"data\": null\r\n }\r\n ]\r\n },\r\n {\r\n \"name\": \"background\",\r\n \"data\": [\r\n 50\r\n ],\r\n \"modifiers\": [\r\n {\r\n \"name\": \"uncorr_bkguncrt\",\r\n \"type\": \"shapesys\",\r\n \"data\": [\r\n 6\r\n ]\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n ],\r\n \"version\": \"1.0.0\",\r\n \"measurements\": [\r\n {\r\n \"name\": \"measurement\",\r\n \"config\": {\r\n \"poi\": \"mu\",\r\n \"parameters\": [\r\n {\r\n \"bounds\": [\r\n [\r\n 0,\r\n 10\r\n ]\r\n ],\r\n \"inits\": [\r\n 1\r\n ],\r\n \"fixed\": false,\r\n \"name\": \"mu\"\r\n },\r\n {\r\n \"bounds\": [\r\n [\r\n 1e-10,\r\n 10\r\n ]\r\n ],\r\n \"inits\": [\r\n 1\r\n ],\r\n \"fixed\": false,\r\n \"name\": \"uncorr_bkguncrt\"\r\n }\r\n ]\r\n }\r\n }\r\n ],\r\n \"observations\": [\r\n {\r\n \"name\": \"singlechannel\",\r\n \"data\": [\r\n 50\r\n ]\r\n }\r\n ]\r\n}\r\n```\n", "before_files": [{"content": "import logging\n\nfrom pathlib import Path\nimport shutil\nimport pkg_resources\nimport xml.etree.cElementTree as ET\nimport numpy as np\nimport uproot\nfrom uproot_methods.classes import TH1\n\nfrom .mixins import _ChannelSummaryMixin\n\n_ROOT_DATA_FILE = None\n\nlog = logging.getLogger(__name__)\n\n# 'spec' gets passed through all functions as NormFactor is a unique case of having\n# parameter configurations stored at the modifier-definition-spec level. This means\n# that build_modifier() needs access to the measurements. The call stack is:\n#\n# writexml\n# ->build_channel\n# ->build_sample\n# ->build_modifier\n#\n# Therefore, 'spec' needs to be threaded through all these calls.\n\n\ndef _make_hist_name(channel, sample, modifier='', prefix='hist', suffix=''):\n middle = '_'.join(filter(lambda x: x, [channel, sample, modifier]))\n return f\"{prefix}{middle}{suffix}\"\n\n\ndef _export_root_histogram(histname, data):\n h = TH1.from_numpy((np.asarray(data), np.arange(len(data) + 1)))\n h._fName = histname\n # NB: uproot crashes for some reason, figure out why later\n # if histname in _ROOT_DATA_FILE:\n # raise KeyError(f'Duplicate key {histname} being written.')\n _ROOT_DATA_FILE[histname] = h\n\n\n# https://stackoverflow.com/a/4590052\ndef indent(elem, level=0):\n i = \"\\n\" + level * \" \"\n if elem:\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for subelem in elem:\n indent(subelem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\ndef build_measurement(measurementspec, modifiertypes):\n \"\"\"\n Build the XML measurement specification for a given measurement adhering to defs.json/#definitions/measurement.\n\n Args:\n measurementspec (:obj:`dict`): The measurements specification from a :class:`~pyhf.workspace.Workspace`.\n modifiertypes (:obj:`dict`): A mapping from modifier name (:obj:`str`) to modifier type (:obj:`str`).\n\n Returns:\n :class:`xml.etree.cElementTree.Element`: The XML measurement specification.\n\n \"\"\"\n # need to determine prefixes\n prefixes = {\n 'normsys': 'alpha_',\n 'histosys': 'alpha_',\n 'shapesys': 'gamma_',\n 'staterror': 'gamma_',\n }\n\n config = measurementspec['config']\n name = measurementspec['name']\n poi = config['poi']\n\n # we want to know which parameters are fixed (constant)\n # and to additionally extract the luminosity information\n fixed_params = []\n lumi = 1.0\n lumierr = 0.0\n for parameter in config['parameters']:\n if parameter.get('fixed', False):\n pname = parameter['name']\n if pname == 'lumi':\n fixed_params.append('Lumi')\n else:\n prefix = prefixes.get(modifiertypes[pname], '')\n fixed_params.append(f'{prefix}{pname}')\n # we found luminosity, so handle it\n if parameter['name'] == 'lumi':\n lumi = parameter['auxdata'][0]\n lumierr = parameter['sigmas'][0]\n\n # define measurement\n meas = ET.Element(\n \"Measurement\",\n Name=name,\n Lumi=str(lumi),\n LumiRelErr=str(lumierr),\n ExportOnly=str(True),\n )\n poiel = ET.Element('POI')\n poiel.text = poi\n meas.append(poiel)\n\n # add fixed parameters (constant)\n if fixed_params:\n se = ET.Element('ParamSetting', Const='True')\n se.text = ' '.join(fixed_params)\n meas.append(se)\n return meas\n\n\ndef build_modifier(spec, modifierspec, channelname, samplename, sampledata):\n if modifierspec['name'] == 'lumi':\n return None\n mod_map = {\n 'histosys': 'HistoSys',\n 'staterror': 'StatError',\n 'normsys': 'OverallSys',\n 'shapesys': 'ShapeSys',\n 'normfactor': 'NormFactor',\n 'shapefactor': 'ShapeFactor',\n }\n\n attrs = {'Name': modifierspec['name']}\n if modifierspec['type'] == 'histosys':\n attrs['HistoNameLow'] = _make_hist_name(\n channelname, samplename, modifierspec['name'], suffix='Low'\n )\n attrs['HistoNameHigh'] = _make_hist_name(\n channelname, samplename, modifierspec['name'], suffix='High'\n )\n _export_root_histogram(attrs['HistoNameLow'], modifierspec['data']['lo_data'])\n _export_root_histogram(attrs['HistoNameHigh'], modifierspec['data']['hi_data'])\n elif modifierspec['type'] == 'normsys':\n attrs['High'] = str(modifierspec['data']['hi'])\n attrs['Low'] = str(modifierspec['data']['lo'])\n elif modifierspec['type'] == 'normfactor':\n # NB: only look at first measurement for normfactor configs. In order\n # to dump as HistFactory XML, this has to be the same for all\n # measurements or it will not work correctly. Why?\n #\n # Unlike other modifiers, NormFactor has the unique circumstance of\n # defining its parameter configurations at the modifier level inside\n # the channel specification, instead of at the measurement level, like\n # all of the other modifiers.\n #\n # However, since I strive for perfection, the \"Const\" attribute will\n # never be set here, but at the per-measurement configuration instead\n # like all other parameters. This is an acceptable compromise.\n #\n # Lastly, if a normfactor parameter configuration doesn't exist in the\n # first measurement parameter configuration, then set defaults.\n val = 1\n low = 0\n high = 10\n for p in spec['measurements'][0]['config']['parameters']:\n if p['name'] == modifierspec['name']:\n val = p['inits'][0]\n low, high = p['bounds'][0]\n attrs['Val'] = str(val)\n attrs['Low'] = str(low)\n attrs['High'] = str(high)\n elif modifierspec['type'] == 'staterror':\n attrs['Activate'] = 'True'\n attrs['HistoName'] = _make_hist_name(\n channelname, samplename, modifierspec['name']\n )\n del attrs['Name']\n # need to make this a relative uncertainty stored in ROOT file\n _export_root_histogram(\n attrs['HistoName'],\n np.divide(\n modifierspec['data'],\n sampledata,\n out=np.zeros_like(sampledata),\n where=np.asarray(sampledata) != 0,\n dtype='float',\n ).tolist(),\n )\n elif modifierspec['type'] == 'shapesys':\n attrs['ConstraintType'] = 'Poisson'\n attrs['HistoName'] = _make_hist_name(\n channelname, samplename, modifierspec['name']\n )\n # need to make this a relative uncertainty stored in ROOT file\n _export_root_histogram(\n attrs['HistoName'],\n [\n np.divide(\n a, b, out=np.zeros_like(a), where=np.asarray(b) != 0, dtype='float'\n )\n for a, b in np.array((modifierspec['data'], sampledata)).T\n ],\n )\n elif modifierspec['type'] == 'shapefactor':\n pass\n else:\n log.warning(\n f\"Skipping modifier {modifierspec['name']}({modifierspec['type']}) for now\"\n )\n return None\n\n modifier = ET.Element(mod_map[modifierspec['type']], **attrs)\n return modifier\n\n\ndef build_sample(spec, samplespec, channelname):\n histname = _make_hist_name(channelname, samplespec['name'])\n attrs = {\n 'Name': samplespec['name'],\n 'HistoName': histname,\n 'InputFile': _ROOT_DATA_FILE._path,\n 'NormalizeByTheory': 'False',\n }\n sample = ET.Element('Sample', **attrs)\n for modspec in samplespec['modifiers']:\n # if lumi modifier added for this sample, need to set NormalizeByTheory\n if modspec['type'] == 'lumi':\n sample.attrib.update({'NormalizeByTheory': 'True'})\n modifier = build_modifier(\n spec, modspec, channelname, samplespec['name'], samplespec['data']\n )\n if modifier is not None:\n sample.append(modifier)\n _export_root_histogram(histname, samplespec['data'])\n return sample\n\n\ndef build_data(obsspec, channelname):\n histname = _make_hist_name(channelname, 'data')\n data = ET.Element('Data', HistoName=histname, InputFile=_ROOT_DATA_FILE._path)\n\n observation = next((obs for obs in obsspec if obs['name'] == channelname), None)\n _export_root_histogram(histname, observation['data'])\n return data\n\n\ndef build_channel(spec, channelspec, obsspec):\n channel = ET.Element(\n 'Channel', Name=channelspec['name'], InputFile=_ROOT_DATA_FILE._path\n )\n if obsspec:\n data = build_data(obsspec, channelspec['name'])\n channel.append(data)\n for samplespec in channelspec['samples']:\n channel.append(build_sample(spec, samplespec, channelspec['name']))\n return channel\n\n\ndef writexml(spec, specdir, data_rootdir, resultprefix):\n global _ROOT_DATA_FILE\n\n shutil.copyfile(\n pkg_resources.resource_filename(__name__, 'schemas/HistFactorySchema.dtd'),\n Path(specdir).parent.joinpath('HistFactorySchema.dtd'),\n )\n combination = ET.Element(\n \"Combination\", OutputFilePrefix=str(Path(specdir).joinpath(resultprefix))\n )\n\n with uproot.recreate(\n str(Path(data_rootdir).joinpath('data.root'))\n ) as _ROOT_DATA_FILE:\n for channelspec in spec['channels']:\n channelfilename = str(\n Path(specdir).joinpath(f'{resultprefix}_{channelspec[\"name\"]}.xml')\n )\n with open(channelfilename, 'w') as channelfile:\n channel = build_channel(spec, channelspec, spec.get('observations'))\n indent(channel)\n channelfile.write(\n \"<!DOCTYPE Channel SYSTEM '../HistFactorySchema.dtd'>\\n\\n\"\n )\n channelfile.write(\n ET.tostring(channel, encoding='utf-8').decode('utf-8')\n )\n\n inp = ET.Element(\"Input\")\n inp.text = channelfilename\n combination.append(inp)\n\n # need information about modifier types to get the right prefix in measurement\n mixin = _ChannelSummaryMixin(channels=spec['channels'])\n\n for measurement in spec['measurements']:\n combination.append(build_measurement(measurement, dict(mixin.modifiers)))\n indent(combination)\n return b\"<!DOCTYPE Combination SYSTEM 'HistFactorySchema.dtd'>\\n\\n\" + ET.tostring(\n combination, encoding='utf-8'\n )\n", "path": "src/pyhf/writexml.py"}], "after_files": [{"content": "import logging\n\nfrom pathlib import Path\nimport shutil\nimport pkg_resources\nimport xml.etree.cElementTree as ET\nimport numpy as np\nimport uproot\nfrom uproot_methods.classes import TH1\n\nfrom .mixins import _ChannelSummaryMixin\n\n_ROOT_DATA_FILE = None\n\nlog = logging.getLogger(__name__)\n\n# 'spec' gets passed through all functions as NormFactor is a unique case of having\n# parameter configurations stored at the modifier-definition-spec level. This means\n# that build_modifier() needs access to the measurements. The call stack is:\n#\n# writexml\n# ->build_channel\n# ->build_sample\n# ->build_modifier\n#\n# Therefore, 'spec' needs to be threaded through all these calls.\n\n\ndef _make_hist_name(channel, sample, modifier='', prefix='hist', suffix=''):\n middle = '_'.join(filter(lambda x: x, [channel, sample, modifier]))\n return f\"{prefix}{middle}{suffix}\"\n\n\ndef _export_root_histogram(histname, data):\n h = TH1.from_numpy((np.asarray(data), np.arange(len(data) + 1)))\n h._fName = histname\n # NB: uproot crashes for some reason, figure out why later\n # if histname in _ROOT_DATA_FILE:\n # raise KeyError(f'Duplicate key {histname} being written.')\n _ROOT_DATA_FILE[histname] = h\n\n\n# https://stackoverflow.com/a/4590052\ndef indent(elem, level=0):\n i = \"\\n\" + level * \" \"\n if elem:\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for subelem in elem:\n indent(subelem, level + 1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n\ndef build_measurement(measurementspec, modifiertypes):\n \"\"\"\n Build the XML measurement specification for a given measurement adhering to defs.json/#definitions/measurement.\n\n Args:\n measurementspec (:obj:`dict`): The measurements specification from a :class:`~pyhf.workspace.Workspace`.\n modifiertypes (:obj:`dict`): A mapping from modifier name (:obj:`str`) to modifier type (:obj:`str`).\n\n Returns:\n :class:`xml.etree.cElementTree.Element`: The XML measurement specification.\n\n \"\"\"\n # need to determine prefixes\n prefixes = {\n 'normsys': 'alpha_',\n 'histosys': 'alpha_',\n 'shapesys': 'gamma_',\n 'staterror': 'gamma_',\n }\n\n config = measurementspec['config']\n name = measurementspec['name']\n poi = config['poi']\n\n # we want to know which parameters are fixed (constant)\n # and to additionally extract the luminosity information\n fixed_params = []\n lumi = 1.0\n lumierr = 0.0\n for parameter in config['parameters']:\n if parameter.get('fixed', False):\n pname = parameter['name']\n if pname == 'lumi':\n fixed_params.append('Lumi')\n else:\n prefix = prefixes.get(modifiertypes[pname], '')\n fixed_params.append(f'{prefix}{pname}')\n # we found luminosity, so handle it\n if parameter['name'] == 'lumi':\n lumi = parameter['auxdata'][0]\n lumierr = parameter['sigmas'][0]\n\n # define measurement\n meas = ET.Element(\n \"Measurement\",\n Name=name,\n Lumi=str(lumi),\n LumiRelErr=str(lumierr),\n ExportOnly=str(True),\n )\n poiel = ET.Element('POI')\n poiel.text = poi\n meas.append(poiel)\n\n # add fixed parameters (constant)\n if fixed_params:\n se = ET.Element('ParamSetting', Const='True')\n se.text = ' '.join(fixed_params)\n meas.append(se)\n return meas\n\n\ndef build_modifier(spec, modifierspec, channelname, samplename, sampledata):\n if modifierspec['name'] == 'lumi':\n return None\n mod_map = {\n 'histosys': 'HistoSys',\n 'staterror': 'StatError',\n 'normsys': 'OverallSys',\n 'shapesys': 'ShapeSys',\n 'normfactor': 'NormFactor',\n 'shapefactor': 'ShapeFactor',\n }\n\n attrs = {'Name': modifierspec['name']}\n if modifierspec['type'] == 'histosys':\n attrs['HistoNameLow'] = _make_hist_name(\n channelname, samplename, modifierspec['name'], suffix='Low'\n )\n attrs['HistoNameHigh'] = _make_hist_name(\n channelname, samplename, modifierspec['name'], suffix='High'\n )\n _export_root_histogram(attrs['HistoNameLow'], modifierspec['data']['lo_data'])\n _export_root_histogram(attrs['HistoNameHigh'], modifierspec['data']['hi_data'])\n elif modifierspec['type'] == 'normsys':\n attrs['High'] = str(modifierspec['data']['hi'])\n attrs['Low'] = str(modifierspec['data']['lo'])\n elif modifierspec['type'] == 'normfactor':\n # NB: only look at first measurement for normfactor configs. In order\n # to dump as HistFactory XML, this has to be the same for all\n # measurements or it will not work correctly. Why?\n #\n # Unlike other modifiers, NormFactor has the unique circumstance of\n # defining its parameter configurations at the modifier level inside\n # the channel specification, instead of at the measurement level, like\n # all of the other modifiers.\n #\n # However, since I strive for perfection, the \"Const\" attribute will\n # never be set here, but at the per-measurement configuration instead\n # like all other parameters. This is an acceptable compromise.\n #\n # Lastly, if a normfactor parameter configuration doesn't exist in the\n # first measurement parameter configuration, then set defaults.\n val = 1\n low = 0\n high = 10\n for p in spec['measurements'][0]['config']['parameters']:\n if p['name'] == modifierspec['name']:\n val = p['inits'][0]\n low, high = p['bounds'][0]\n attrs['Val'] = str(val)\n attrs['Low'] = str(low)\n attrs['High'] = str(high)\n elif modifierspec['type'] == 'staterror':\n attrs['Activate'] = 'True'\n attrs['HistoName'] = _make_hist_name(\n channelname, samplename, modifierspec['name']\n )\n del attrs['Name']\n # need to make this a relative uncertainty stored in ROOT file\n _export_root_histogram(\n attrs['HistoName'],\n np.divide(\n modifierspec['data'],\n sampledata,\n out=np.zeros_like(sampledata),\n where=np.asarray(sampledata) != 0,\n dtype='float',\n ).tolist(),\n )\n elif modifierspec['type'] == 'shapesys':\n attrs['ConstraintType'] = 'Poisson'\n attrs['HistoName'] = _make_hist_name(\n channelname, samplename, modifierspec['name']\n )\n # need to make this a relative uncertainty stored in ROOT file\n _export_root_histogram(\n attrs['HistoName'],\n [\n np.divide(\n a, b, out=np.zeros_like(a), where=np.asarray(b) != 0, dtype='float'\n )\n for a, b in np.array(\n (modifierspec['data'], sampledata), dtype=\"float\"\n ).T\n ],\n )\n elif modifierspec['type'] == 'shapefactor':\n pass\n else:\n log.warning(\n f\"Skipping modifier {modifierspec['name']}({modifierspec['type']}) for now\"\n )\n return None\n\n modifier = ET.Element(mod_map[modifierspec['type']], **attrs)\n return modifier\n\n\ndef build_sample(spec, samplespec, channelname):\n histname = _make_hist_name(channelname, samplespec['name'])\n attrs = {\n 'Name': samplespec['name'],\n 'HistoName': histname,\n 'InputFile': _ROOT_DATA_FILE._path,\n 'NormalizeByTheory': 'False',\n }\n sample = ET.Element('Sample', **attrs)\n for modspec in samplespec['modifiers']:\n # if lumi modifier added for this sample, need to set NormalizeByTheory\n if modspec['type'] == 'lumi':\n sample.attrib.update({'NormalizeByTheory': 'True'})\n modifier = build_modifier(\n spec, modspec, channelname, samplespec['name'], samplespec['data']\n )\n if modifier is not None:\n sample.append(modifier)\n _export_root_histogram(histname, samplespec['data'])\n return sample\n\n\ndef build_data(obsspec, channelname):\n histname = _make_hist_name(channelname, 'data')\n data = ET.Element('Data', HistoName=histname, InputFile=_ROOT_DATA_FILE._path)\n\n observation = next((obs for obs in obsspec if obs['name'] == channelname), None)\n _export_root_histogram(histname, observation['data'])\n return data\n\n\ndef build_channel(spec, channelspec, obsspec):\n channel = ET.Element(\n 'Channel', Name=channelspec['name'], InputFile=_ROOT_DATA_FILE._path\n )\n if obsspec:\n data = build_data(obsspec, channelspec['name'])\n channel.append(data)\n for samplespec in channelspec['samples']:\n channel.append(build_sample(spec, samplespec, channelspec['name']))\n return channel\n\n\ndef writexml(spec, specdir, data_rootdir, resultprefix):\n global _ROOT_DATA_FILE\n\n shutil.copyfile(\n pkg_resources.resource_filename(__name__, 'schemas/HistFactorySchema.dtd'),\n Path(specdir).parent.joinpath('HistFactorySchema.dtd'),\n )\n combination = ET.Element(\n \"Combination\", OutputFilePrefix=str(Path(specdir).joinpath(resultprefix))\n )\n\n with uproot.recreate(\n str(Path(data_rootdir).joinpath('data.root'))\n ) as _ROOT_DATA_FILE:\n for channelspec in spec['channels']:\n channelfilename = str(\n Path(specdir).joinpath(f'{resultprefix}_{channelspec[\"name\"]}.xml')\n )\n with open(channelfilename, 'w') as channelfile:\n channel = build_channel(spec, channelspec, spec.get('observations'))\n indent(channel)\n channelfile.write(\n \"<!DOCTYPE Channel SYSTEM '../HistFactorySchema.dtd'>\\n\\n\"\n )\n channelfile.write(\n ET.tostring(channel, encoding='utf-8').decode('utf-8')\n )\n\n inp = ET.Element(\"Input\")\n inp.text = channelfilename\n combination.append(inp)\n\n # need information about modifier types to get the right prefix in measurement\n mixin = _ChannelSummaryMixin(channels=spec['channels'])\n\n for measurement in spec['measurements']:\n combination.append(build_measurement(measurement, dict(mixin.modifiers)))\n indent(combination)\n return b\"<!DOCTYPE Combination SYSTEM 'HistFactorySchema.dtd'>\\n\\n\" + ET.tostring(\n combination, encoding='utf-8'\n )\n", "path": "src/pyhf/writexml.py"}]}
3,946
154
gh_patches_debug_24456
rasdani/github-patches
git_diff
bridgecrewio__checkov-1096
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Duplicate check **Describe the bug** The new check `CKV_AWS_125` popped up complaining about me not activating point in time recovery for a DynamoDB table, which is surprising, because I already acknowledged it in the past. After digging into the code I found out that check `CKV_AWS_125` and `CKV_AWS_28` are identical. It would be great to delete one of them, probably the new one. **Additional context** https://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbRecovery.py#L5-L14 https://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py#L5-L17 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py` Content: ``` 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure that that point in time recovery is enabled for Amazon DynamoDB tables" 8 id = "CKV_AWS_125" 9 supported_resources = ['aws_dynamodb_table'] 10 categories = [CheckCategories.BACKUP_AND_RECOVERY] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return "point_in_time_recovery/[0]/enabled" 15 16 def get_expected_value(self): 17 return True 18 19 20 check = DynamodbPointInTimeRecoveryEnabled() 21 ``` Path: `checkov/terraform/checks/resource/aws/KMSKeyRotation.py` Content: ``` 1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 2 from checkov.common.models.enums import CheckCategories 3 4 5 class KMSKeyRotation(BaseResourceValueCheck): 6 7 def __init__(self): 8 name = "Ensure KMS have rotation policy" 9 id = "CKV_AWS_132" 10 supported_resources = ['aws_kms_key'] 11 categories = [CheckCategories.ENCRYPTION] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 13 14 def get_inspected_key(self): 15 return "enable_key_rotation" 16 17 18 check = KMSKeyRotation() 19 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py b/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py +++ /dev/null @@ -1,20 +0,0 @@ -from checkov.common.models.enums import CheckCategories, CheckResult -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck - - -class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck): - def __init__(self): - name = "Ensure that that point in time recovery is enabled for Amazon DynamoDB tables" - id = "CKV_AWS_125" - supported_resources = ['aws_dynamodb_table'] - categories = [CheckCategories.BACKUP_AND_RECOVERY] - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def get_inspected_key(self): - return "point_in_time_recovery/[0]/enabled" - - def get_expected_value(self): - return True - - -check = DynamodbPointInTimeRecoveryEnabled() diff --git a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py b/checkov/terraform/checks/resource/aws/KMSKeyRotation.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py +++ /dev/null @@ -1,18 +0,0 @@ -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck -from checkov.common.models.enums import CheckCategories - - -class KMSKeyRotation(BaseResourceValueCheck): - - def __init__(self): - name = "Ensure KMS have rotation policy" - id = "CKV_AWS_132" - supported_resources = ['aws_kms_key'] - categories = [CheckCategories.ENCRYPTION] - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def get_inspected_key(self): - return "enable_key_rotation" - - -check = KMSKeyRotation()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py b/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-from checkov.common.models.enums import CheckCategories, CheckResult\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-\n-\n-class DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure that that point in time recovery is enabled for Amazon DynamoDB tables\"\n- id = \"CKV_AWS_125\"\n- supported_resources = ['aws_dynamodb_table']\n- categories = [CheckCategories.BACKUP_AND_RECOVERY]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"point_in_time_recovery/[0]/enabled\"\n-\n- def get_expected_value(self):\n- return True\n-\n-\n-check = DynamodbPointInTimeRecoveryEnabled()\ndiff --git a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py b/checkov/terraform/checks/resource/aws/KMSKeyRotation.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/KMSKeyRotation.py\n+++ /dev/null\n@@ -1,18 +0,0 @@\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class KMSKeyRotation(BaseResourceValueCheck):\n-\n- def __init__(self):\n- name = \"Ensure KMS have rotation policy\"\n- id = \"CKV_AWS_132\"\n- supported_resources = ['aws_kms_key']\n- categories = [CheckCategories.ENCRYPTION]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"enable_key_rotation\"\n-\n-\n-check = KMSKeyRotation()\n", "issue": "Duplicate check\n**Describe the bug**\r\nThe new check `CKV_AWS_125` popped up complaining about me not activating point in time recovery for a DynamoDB table, which is surprising, because I already acknowledged it in the past. After digging into the code I found out that check `CKV_AWS_125` and `CKV_AWS_28` are identical. It would be great to delete one of them, probably the new one.\r\n\r\n**Additional context**\r\nhttps://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbRecovery.py#L5-L14\r\nhttps://github.com/bridgecrewio/checkov/blob/caafcbb83d6901b57c9a39464673670c150f0a33/checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py#L5-L17\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass DynamodbPointInTimeRecoveryEnabled(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that that point in time recovery is enabled for Amazon DynamoDB tables\"\n id = \"CKV_AWS_125\"\n supported_resources = ['aws_dynamodb_table']\n categories = [CheckCategories.BACKUP_AND_RECOVERY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"point_in_time_recovery/[0]/enabled\"\n\n def get_expected_value(self):\n return True\n\n\ncheck = DynamodbPointInTimeRecoveryEnabled()\n", "path": "checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass KMSKeyRotation(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure KMS have rotation policy\"\n id = \"CKV_AWS_132\"\n supported_resources = ['aws_kms_key']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"enable_key_rotation\"\n\n\ncheck = KMSKeyRotation()\n", "path": "checkov/terraform/checks/resource/aws/KMSKeyRotation.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/aws/DynamodbPointInTimeRecoveryEnabled.py"}, {"content": null, "path": "checkov/terraform/checks/resource/aws/KMSKeyRotation.py"}]}
912
493
gh_patches_debug_39561
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-2530
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Write a tutorial of elastic AllReduce with PyTorch --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `model_zoo/mnist/mnist_pytorch.py` Content: ``` 1 # Copyright 2020 The ElasticDL Authors. All rights reserved. 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 """ 15 Download the mnist dataset from 16 https://s3.amazonaws.com/fast-ai-imageclas/mnist_png.tgz 17 and then untar it into ${data_store_dir}. Using minikube, we can use the 18 following command to submit a training job with these codes. 19 20 elasticdl train \ 21 --image_name=elasticdl:pt_mnist_allreduce \ 22 --job_command="python -m model_zoo.mnist.mnist_pytorch \ 23 --training_data=/local_data/mnist_png/training \ 24 --validation_data=/local_data/mnist_png/testing" \ 25 --num_minibatches_per_task=2 \ 26 --num_workers=2 \ 27 --worker_pod_priority=0.5 \ 28 --master_resource_request="cpu=0.2,memory=1024Mi" \ 29 --master_resource_limit="cpu=1,memory=2048Mi" \ 30 --worker_resource_request="cpu=0.3,memory=1024Mi" \ 31 --worker_resource_limit="cpu=1,memory=2048Mi" \ 32 --envs="USE_TORCH=true,HOROVOD_GLOO_TIMEOUT_SECONDS=60,PYTHONUNBUFFERED=0" \ 33 --job_name=test-mnist-allreduce \ 34 --image_pull_policy=Never \ 35 --volume="host_path=${data_store_dir},mount_path=/local_data" \ 36 --distribution_strategy=AllreduceStrategy \ 37 """ 38 39 import argparse 40 import sys 41 42 import cv2 43 import numpy as np 44 import torch 45 import torch.nn as nn 46 import torch.nn.functional as F 47 import torch.optim as optim 48 import torchvision 49 from torch.optim.lr_scheduler import StepLR 50 from torch.utils.data import DataLoader, Dataset 51 52 from elasticai_api.pytorch.controller import create_elastic_controller 53 from elasticai_api.pytorch.optimizer import DistributedOptimizer 54 55 56 class ElasticDataset(Dataset): 57 def __init__(self, images, data_shard_service=None): 58 """The dataset supports elastic training. 59 60 Args: 61 images: A list with tuples like (image_path, label_index). 62 For example, we can use `torchvision.datasets.ImageFolder` 63 to get the list. 64 data_shard_service: If we want to use elastic training, we 65 need to use the `data_shard_service` of the elastic controller 66 in elasticai_api. 67 """ 68 self.data_shard_service = data_shard_service 69 self._images = images 70 71 def __len__(self): 72 if self.data_shard_service: 73 # Set the maxsize because the size of dataset is not fixed 74 # when using dynamic sharding 75 return sys.maxsize 76 else: 77 return len(self._images) 78 79 def __getitem__(self, index): 80 if self.data_shard_service: 81 index = self.data_shard_service.fetch_record_index() 82 return self.read_image(index) 83 else: 84 return self.read_image(index) 85 86 def read_image(self, index): 87 image_path, label = self._images[index] 88 image = cv2.imread(image_path) 89 image = np.array(image / 255.0, np.float32) 90 image = image.reshape(3, 28, 28) 91 return image, label 92 93 94 class Net(nn.Module): 95 def __init__(self): 96 super(Net, self).__init__() 97 self.conv1 = nn.Conv2d(3, 32, 3, 1) 98 self.conv2 = nn.Conv2d(32, 64, 3, 1) 99 self.dropout1 = nn.Dropout(0.25) 100 self.dropout2 = nn.Dropout(0.5) 101 self.fc1 = nn.Linear(9216, 128) 102 self.fc2 = nn.Linear(128, 10) 103 104 def forward(self, x): 105 x = self.conv1(x) 106 x = F.relu(x) 107 x = self.conv2(x) 108 x = F.relu(x) 109 x = F.max_pool2d(x, 2) 110 x = self.dropout1(x) 111 x = torch.flatten(x, 1) 112 x = self.fc1(x) 113 x = F.relu(x) 114 x = self.dropout2(x) 115 x = self.fc2(x) 116 output = F.log_softmax(x, dim=1) 117 return output 118 119 120 def train(args): 121 """ The function to run the training loop. 122 Args: 123 dataset: The dataset is provided by ElasticDL for the elastic training. 124 Now, the dataset if tf.data.Dataset and we need to convert 125 the data in dataset to torch.tensor. Later, ElasticDL will 126 pass a torch.utils.data.DataLoader. 127 elastic_controller: The controller for elastic training. 128 """ 129 use_cuda = not args.no_cuda and torch.cuda.is_available() 130 device = torch.device("cuda" if use_cuda else "cpu") 131 train_data = torchvision.datasets.ImageFolder(args.training_data) 132 test_data = torchvision.datasets.ImageFolder(args.validation_data) 133 134 allreduce_controller = create_elastic_controller( 135 batch_size=args.batch_size, 136 dataset_size=len(train_data.imgs), 137 num_epochs=args.num_epochs, 138 shuffle=True, 139 ) 140 train_dataset = ElasticDataset( 141 train_data.imgs, allreduce_controller.data_shard_service 142 ) 143 train_loader = DataLoader( 144 dataset=train_dataset, batch_size=args.batch_size, num_workers=2 145 ) 146 147 test_dataset = ElasticDataset(test_data.imgs) 148 test_loader = DataLoader( 149 dataset=test_dataset, batch_size=args.batch_size, num_workers=2 150 ) 151 152 model = Net() 153 optimizer = optim.SGD(model.parameters(), lr=args.learning_rate) 154 optimizer = DistributedOptimizer(optimizer, fixed_global_batch_size=True) 155 scheduler = StepLR(optimizer, step_size=1, gamma=0.5) 156 157 # Set the model and optimizer to broadcast. 158 allreduce_controller.set_broadcast_model(model) 159 allreduce_controller.set_broadcast_optimizer(optimizer) 160 epoch = 0 161 # Use the elastic function to wrap the training function with a batch. 162 elastic_train_one_batch = allreduce_controller.elastic_run(train_one_batch) 163 if torch.cuda.is_available(): 164 model.cuda() 165 with allreduce_controller.scope(): 166 for batch_idx, (data, target) in enumerate(train_loader): 167 model.train() 168 target = target.type(torch.LongTensor) 169 data, target = data.to(device), target.to(device) 170 loss = elastic_train_one_batch(model, optimizer, data, target) 171 print("loss = {}, step = {}".format(loss, batch_idx)) 172 new_epoch = allreduce_controller.get_current_epoch() 173 if new_epoch > epoch: 174 epoch = new_epoch 175 # Set epoch of the scheduler 176 scheduler.last_epoch = epoch - 1 177 scheduler.step() 178 test(model, device, test_loader) 179 180 181 def train_one_batch(model, optimizer, data, target): 182 optimizer.zero_grad() 183 output = model(data) 184 loss = F.nll_loss(output, target) 185 loss.backward() 186 optimizer.step() 187 return loss 188 189 190 def test(model, device, test_loader): 191 model.eval() 192 test_loss = 0 193 correct = 0 194 with torch.no_grad(): 195 for data, target in test_loader: 196 data, target = data.to(device), target.to(device) 197 output = model(data) 198 test_loss += F.nll_loss( 199 output, target, reduction="sum" 200 ).item() # sum up batch loss 201 pred = output.argmax( 202 dim=1, keepdim=True 203 ) # get the index of the max log-probability 204 correct += pred.eq(target.view_as(pred)).sum().item() 205 206 test_loss /= len(test_loader.dataset) 207 208 print( 209 "\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format( 210 test_loss, 211 correct, 212 len(test_loader.dataset), 213 100.0 * correct / len(test_loader.dataset), 214 ) 215 ) 216 217 218 def arg_parser(): 219 parser = argparse.ArgumentParser(description="Process training parameters") 220 parser.add_argument("--batch_size", type=int, default=64, required=False) 221 parser.add_argument("--num_epochs", type=int, default=1, required=False) 222 parser.add_argument( 223 "--learning_rate", type=float, default=0.1, required=False 224 ) 225 parser.add_argument( 226 "--no-cuda", 227 action="store_true", 228 default=False, 229 help="disable CUDA training", 230 ) 231 parser.add_argument("--training_data", type=str, required=True) 232 parser.add_argument( 233 "--validation_data", type=str, default="", required=False 234 ) 235 return parser 236 237 238 if __name__ == "__main__": 239 parser = arg_parser() 240 args = parser.parse_args() 241 train(args) 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/model_zoo/mnist/mnist_pytorch.py b/model_zoo/mnist/mnist_pytorch.py --- a/model_zoo/mnist/mnist_pytorch.py +++ b/model_zoo/mnist/mnist_pytorch.py @@ -11,31 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Download the mnist dataset from -https://s3.amazonaws.com/fast-ai-imageclas/mnist_png.tgz -and then untar it into ${data_store_dir}. Using minikube, we can use the -following command to submit a training job with these codes. - -elasticdl train \ - --image_name=elasticdl:pt_mnist_allreduce \ - --job_command="python -m model_zoo.mnist.mnist_pytorch \ - --training_data=/local_data/mnist_png/training \ - --validation_data=/local_data/mnist_png/testing" \ - --num_minibatches_per_task=2 \ - --num_workers=2 \ - --worker_pod_priority=0.5 \ - --master_resource_request="cpu=0.2,memory=1024Mi" \ - --master_resource_limit="cpu=1,memory=2048Mi" \ - --worker_resource_request="cpu=0.3,memory=1024Mi" \ - --worker_resource_limit="cpu=1,memory=2048Mi" \ - --envs="USE_TORCH=true,HOROVOD_GLOO_TIMEOUT_SECONDS=60,PYTHONUNBUFFERED=0" \ - --job_name=test-mnist-allreduce \ - --image_pull_policy=Never \ - --volume="host_path=${data_store_dir},mount_path=/local_data" \ - --distribution_strategy=AllreduceStrategy \ -""" - import argparse import sys @@ -49,7 +24,8 @@ from torch.optim.lr_scheduler import StepLR from torch.utils.data import DataLoader, Dataset -from elasticai_api.pytorch.controller import create_elastic_controller +from elasticai_api.common.data_shard_service import RecordIndexService +from elasticai_api.pytorch.controller import PyTorchAllReduceController from elasticai_api.pytorch.optimizer import DistributedOptimizer @@ -131,15 +107,14 @@ train_data = torchvision.datasets.ImageFolder(args.training_data) test_data = torchvision.datasets.ImageFolder(args.validation_data) - allreduce_controller = create_elastic_controller( + data_shard_service = RecordIndexService( batch_size=args.batch_size, dataset_size=len(train_data.imgs), num_epochs=args.num_epochs, shuffle=True, + dataset_name="mnist_training_data", ) - train_dataset = ElasticDataset( - train_data.imgs, allreduce_controller.data_shard_service - ) + train_dataset = ElasticDataset(train_data.imgs, data_shard_service) train_loader = DataLoader( dataset=train_dataset, batch_size=args.batch_size, num_workers=2 ) @@ -155,6 +130,7 @@ scheduler = StepLR(optimizer, step_size=1, gamma=0.5) # Set the model and optimizer to broadcast. + allreduce_controller = PyTorchAllReduceController(data_shard_service) allreduce_controller.set_broadcast_model(model) allreduce_controller.set_broadcast_optimizer(optimizer) epoch = 0
{"golden_diff": "diff --git a/model_zoo/mnist/mnist_pytorch.py b/model_zoo/mnist/mnist_pytorch.py\n--- a/model_zoo/mnist/mnist_pytorch.py\n+++ b/model_zoo/mnist/mnist_pytorch.py\n@@ -11,31 +11,6 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\"\"\"\n-Download the mnist dataset from\n-https://s3.amazonaws.com/fast-ai-imageclas/mnist_png.tgz\n-and then untar it into ${data_store_dir}. Using minikube, we can use the\n-following command to submit a training job with these codes.\n-\n-elasticdl train \\\n- --image_name=elasticdl:pt_mnist_allreduce \\\n- --job_command=\"python -m model_zoo.mnist.mnist_pytorch \\\n- --training_data=/local_data/mnist_png/training \\\n- --validation_data=/local_data/mnist_png/testing\" \\\n- --num_minibatches_per_task=2 \\\n- --num_workers=2 \\\n- --worker_pod_priority=0.5 \\\n- --master_resource_request=\"cpu=0.2,memory=1024Mi\" \\\n- --master_resource_limit=\"cpu=1,memory=2048Mi\" \\\n- --worker_resource_request=\"cpu=0.3,memory=1024Mi\" \\\n- --worker_resource_limit=\"cpu=1,memory=2048Mi\" \\\n- --envs=\"USE_TORCH=true,HOROVOD_GLOO_TIMEOUT_SECONDS=60,PYTHONUNBUFFERED=0\" \\\n- --job_name=test-mnist-allreduce \\\n- --image_pull_policy=Never \\\n- --volume=\"host_path=${data_store_dir},mount_path=/local_data\" \\\n- --distribution_strategy=AllreduceStrategy \\\n-\"\"\"\n-\n import argparse\n import sys\n \n@@ -49,7 +24,8 @@\n from torch.optim.lr_scheduler import StepLR\n from torch.utils.data import DataLoader, Dataset\n \n-from elasticai_api.pytorch.controller import create_elastic_controller\n+from elasticai_api.common.data_shard_service import RecordIndexService\n+from elasticai_api.pytorch.controller import PyTorchAllReduceController\n from elasticai_api.pytorch.optimizer import DistributedOptimizer\n \n \n@@ -131,15 +107,14 @@\n train_data = torchvision.datasets.ImageFolder(args.training_data)\n test_data = torchvision.datasets.ImageFolder(args.validation_data)\n \n- allreduce_controller = create_elastic_controller(\n+ data_shard_service = RecordIndexService(\n batch_size=args.batch_size,\n dataset_size=len(train_data.imgs),\n num_epochs=args.num_epochs,\n shuffle=True,\n+ dataset_name=\"mnist_training_data\",\n )\n- train_dataset = ElasticDataset(\n- train_data.imgs, allreduce_controller.data_shard_service\n- )\n+ train_dataset = ElasticDataset(train_data.imgs, data_shard_service)\n train_loader = DataLoader(\n dataset=train_dataset, batch_size=args.batch_size, num_workers=2\n )\n@@ -155,6 +130,7 @@\n scheduler = StepLR(optimizer, step_size=1, gamma=0.5)\n \n # Set the model and optimizer to broadcast.\n+ allreduce_controller = PyTorchAllReduceController(data_shard_service)\n allreduce_controller.set_broadcast_model(model)\n allreduce_controller.set_broadcast_optimizer(optimizer)\n epoch = 0\n", "issue": "Write a tutorial of elastic AllReduce with PyTorch\n\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDownload the mnist dataset from\nhttps://s3.amazonaws.com/fast-ai-imageclas/mnist_png.tgz\nand then untar it into ${data_store_dir}. Using minikube, we can use the\nfollowing command to submit a training job with these codes.\n\nelasticdl train \\\n --image_name=elasticdl:pt_mnist_allreduce \\\n --job_command=\"python -m model_zoo.mnist.mnist_pytorch \\\n --training_data=/local_data/mnist_png/training \\\n --validation_data=/local_data/mnist_png/testing\" \\\n --num_minibatches_per_task=2 \\\n --num_workers=2 \\\n --worker_pod_priority=0.5 \\\n --master_resource_request=\"cpu=0.2,memory=1024Mi\" \\\n --master_resource_limit=\"cpu=1,memory=2048Mi\" \\\n --worker_resource_request=\"cpu=0.3,memory=1024Mi\" \\\n --worker_resource_limit=\"cpu=1,memory=2048Mi\" \\\n --envs=\"USE_TORCH=true,HOROVOD_GLOO_TIMEOUT_SECONDS=60,PYTHONUNBUFFERED=0\" \\\n --job_name=test-mnist-allreduce \\\n --image_pull_policy=Never \\\n --volume=\"host_path=${data_store_dir},mount_path=/local_data\" \\\n --distribution_strategy=AllreduceStrategy \\\n\"\"\"\n\nimport argparse\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom elasticai_api.pytorch.controller import create_elastic_controller\nfrom elasticai_api.pytorch.optimizer import DistributedOptimizer\n\n\nclass ElasticDataset(Dataset):\n def __init__(self, images, data_shard_service=None):\n \"\"\"The dataset supports elastic training.\n\n Args:\n images: A list with tuples like (image_path, label_index).\n For example, we can use `torchvision.datasets.ImageFolder`\n to get the list.\n data_shard_service: If we want to use elastic training, we\n need to use the `data_shard_service` of the elastic controller\n in elasticai_api.\n \"\"\"\n self.data_shard_service = data_shard_service\n self._images = images\n\n def __len__(self):\n if self.data_shard_service:\n # Set the maxsize because the size of dataset is not fixed\n # when using dynamic sharding\n return sys.maxsize\n else:\n return len(self._images)\n\n def __getitem__(self, index):\n if self.data_shard_service:\n index = self.data_shard_service.fetch_record_index()\n return self.read_image(index)\n else:\n return self.read_image(index)\n\n def read_image(self, index):\n image_path, label = self._images[index]\n image = cv2.imread(image_path)\n image = np.array(image / 255.0, np.float32)\n image = image.reshape(3, 28, 28)\n return image, label\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\ndef train(args):\n \"\"\" The function to run the training loop.\n Args:\n dataset: The dataset is provided by ElasticDL for the elastic training.\n Now, the dataset if tf.data.Dataset and we need to convert\n the data in dataset to torch.tensor. Later, ElasticDL will\n pass a torch.utils.data.DataLoader.\n elastic_controller: The controller for elastic training.\n \"\"\"\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n train_data = torchvision.datasets.ImageFolder(args.training_data)\n test_data = torchvision.datasets.ImageFolder(args.validation_data)\n\n allreduce_controller = create_elastic_controller(\n batch_size=args.batch_size,\n dataset_size=len(train_data.imgs),\n num_epochs=args.num_epochs,\n shuffle=True,\n )\n train_dataset = ElasticDataset(\n train_data.imgs, allreduce_controller.data_shard_service\n )\n train_loader = DataLoader(\n dataset=train_dataset, batch_size=args.batch_size, num_workers=2\n )\n\n test_dataset = ElasticDataset(test_data.imgs)\n test_loader = DataLoader(\n dataset=test_dataset, batch_size=args.batch_size, num_workers=2\n )\n\n model = Net()\n optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)\n optimizer = DistributedOptimizer(optimizer, fixed_global_batch_size=True)\n scheduler = StepLR(optimizer, step_size=1, gamma=0.5)\n\n # Set the model and optimizer to broadcast.\n allreduce_controller.set_broadcast_model(model)\n allreduce_controller.set_broadcast_optimizer(optimizer)\n epoch = 0\n # Use the elastic function to wrap the training function with a batch.\n elastic_train_one_batch = allreduce_controller.elastic_run(train_one_batch)\n if torch.cuda.is_available():\n model.cuda()\n with allreduce_controller.scope():\n for batch_idx, (data, target) in enumerate(train_loader):\n model.train()\n target = target.type(torch.LongTensor)\n data, target = data.to(device), target.to(device)\n loss = elastic_train_one_batch(model, optimizer, data, target)\n print(\"loss = {}, step = {}\".format(loss, batch_idx))\n new_epoch = allreduce_controller.get_current_epoch()\n if new_epoch > epoch:\n epoch = new_epoch\n # Set epoch of the scheduler\n scheduler.last_epoch = epoch - 1\n scheduler.step()\n test(model, device, test_loader)\n\n\ndef train_one_batch(model, optimizer, data, target):\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n return loss\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(\n output, target, reduction=\"sum\"\n ).item() # sum up batch loss\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print(\n \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description=\"Process training parameters\")\n parser.add_argument(\"--batch_size\", type=int, default=64, required=False)\n parser.add_argument(\"--num_epochs\", type=int, default=1, required=False)\n parser.add_argument(\n \"--learning_rate\", type=float, default=0.1, required=False\n )\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"disable CUDA training\",\n )\n parser.add_argument(\"--training_data\", type=str, required=True)\n parser.add_argument(\n \"--validation_data\", type=str, default=\"\", required=False\n )\n return parser\n\n\nif __name__ == \"__main__\":\n parser = arg_parser()\n args = parser.parse_args()\n train(args)\n", "path": "model_zoo/mnist/mnist_pytorch.py"}], "after_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nfrom torch.optim.lr_scheduler import StepLR\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom elasticai_api.common.data_shard_service import RecordIndexService\nfrom elasticai_api.pytorch.controller import PyTorchAllReduceController\nfrom elasticai_api.pytorch.optimizer import DistributedOptimizer\n\n\nclass ElasticDataset(Dataset):\n def __init__(self, images, data_shard_service=None):\n \"\"\"The dataset supports elastic training.\n\n Args:\n images: A list with tuples like (image_path, label_index).\n For example, we can use `torchvision.datasets.ImageFolder`\n to get the list.\n data_shard_service: If we want to use elastic training, we\n need to use the `data_shard_service` of the elastic controller\n in elasticai_api.\n \"\"\"\n self.data_shard_service = data_shard_service\n self._images = images\n\n def __len__(self):\n if self.data_shard_service:\n # Set the maxsize because the size of dataset is not fixed\n # when using dynamic sharding\n return sys.maxsize\n else:\n return len(self._images)\n\n def __getitem__(self, index):\n if self.data_shard_service:\n index = self.data_shard_service.fetch_record_index()\n return self.read_image(index)\n else:\n return self.read_image(index)\n\n def read_image(self, index):\n image_path, label = self._images[index]\n image = cv2.imread(image_path)\n image = np.array(image / 255.0, np.float32)\n image = image.reshape(3, 28, 28)\n return image, label\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n\ndef train(args):\n \"\"\" The function to run the training loop.\n Args:\n dataset: The dataset is provided by ElasticDL for the elastic training.\n Now, the dataset if tf.data.Dataset and we need to convert\n the data in dataset to torch.tensor. Later, ElasticDL will\n pass a torch.utils.data.DataLoader.\n elastic_controller: The controller for elastic training.\n \"\"\"\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n train_data = torchvision.datasets.ImageFolder(args.training_data)\n test_data = torchvision.datasets.ImageFolder(args.validation_data)\n\n data_shard_service = RecordIndexService(\n batch_size=args.batch_size,\n dataset_size=len(train_data.imgs),\n num_epochs=args.num_epochs,\n shuffle=True,\n dataset_name=\"mnist_training_data\",\n )\n train_dataset = ElasticDataset(train_data.imgs, data_shard_service)\n train_loader = DataLoader(\n dataset=train_dataset, batch_size=args.batch_size, num_workers=2\n )\n\n test_dataset = ElasticDataset(test_data.imgs)\n test_loader = DataLoader(\n dataset=test_dataset, batch_size=args.batch_size, num_workers=2\n )\n\n model = Net()\n optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)\n optimizer = DistributedOptimizer(optimizer, fixed_global_batch_size=True)\n scheduler = StepLR(optimizer, step_size=1, gamma=0.5)\n\n # Set the model and optimizer to broadcast.\n allreduce_controller = PyTorchAllReduceController(data_shard_service)\n allreduce_controller.set_broadcast_model(model)\n allreduce_controller.set_broadcast_optimizer(optimizer)\n epoch = 0\n # Use the elastic function to wrap the training function with a batch.\n elastic_train_one_batch = allreduce_controller.elastic_run(train_one_batch)\n if torch.cuda.is_available():\n model.cuda()\n with allreduce_controller.scope():\n for batch_idx, (data, target) in enumerate(train_loader):\n model.train()\n target = target.type(torch.LongTensor)\n data, target = data.to(device), target.to(device)\n loss = elastic_train_one_batch(model, optimizer, data, target)\n print(\"loss = {}, step = {}\".format(loss, batch_idx))\n new_epoch = allreduce_controller.get_current_epoch()\n if new_epoch > epoch:\n epoch = new_epoch\n # Set epoch of the scheduler\n scheduler.last_epoch = epoch - 1\n scheduler.step()\n test(model, device, test_loader)\n\n\ndef train_one_batch(model, optimizer, data, target):\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n return loss\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(\n output, target, reduction=\"sum\"\n ).item() # sum up batch loss\n pred = output.argmax(\n dim=1, keepdim=True\n ) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print(\n \"\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n\".format(\n test_loss,\n correct,\n len(test_loader.dataset),\n 100.0 * correct / len(test_loader.dataset),\n )\n )\n\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description=\"Process training parameters\")\n parser.add_argument(\"--batch_size\", type=int, default=64, required=False)\n parser.add_argument(\"--num_epochs\", type=int, default=1, required=False)\n parser.add_argument(\n \"--learning_rate\", type=float, default=0.1, required=False\n )\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"disable CUDA training\",\n )\n parser.add_argument(\"--training_data\", type=str, required=True)\n parser.add_argument(\n \"--validation_data\", type=str, default=\"\", required=False\n )\n return parser\n\n\nif __name__ == \"__main__\":\n parser = arg_parser()\n args = parser.parse_args()\n train(args)\n", "path": "model_zoo/mnist/mnist_pytorch.py"}]}
2,916
758
gh_patches_debug_19978
rasdani/github-patches
git_diff
freedomofpress__securedrop-188
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Returning "flagged for reply" sources generate an error When a source is flagged for a reply. And the source returns to the site after initial session is ended. It generates the error below. ``` [Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET] [Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last): [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in wsgi_app [Mon Dec 02 21:49:44 2013] [error] response = self.full_dispatch_request() [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in full_dispatch_request [Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e) [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in handle_user_exception [Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb) [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1473, in full_dispatch_request [Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request() [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1666, in preprocess_request [Mon Dec 02 21:49:44 2013] [error] rv = func() [Mon Dec 02 21:49:44 2013] [error] File "/var/www/securedrop/source.py", line 52, in decorated_function [Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs) [Mon Dec 02 21:49:44 2013] [error] File "/var/www/securedrop/source.py", line 64, in setup_g [Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged'] [Mon Dec 02 21:49:44 2013] [error] File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 368, in <lambda> [Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i: x._get_current_object()[i] [Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/source.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import os 3 from datetime import datetime 4 import uuid 5 from functools import wraps 6 7 from flask import (Flask, request, render_template, session, redirect, url_for, 8 flash, abort, g, send_file) 9 from flask_wtf.csrf import CsrfProtect 10 11 import config 12 import version 13 import crypto_util 14 import store 15 import background 16 import zipfile 17 from cStringIO import StringIO 18 19 app = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR) 20 app.config.from_object(config.FlaskConfig) 21 CsrfProtect(app) 22 23 app.jinja_env.globals['version'] = version.__version__ 24 if getattr(config, 'CUSTOM_HEADER_IMAGE', None): 25 app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE 26 app.jinja_env.globals['use_custom_header_image'] = True 27 else: 28 app.jinja_env.globals['header_image'] = 'securedrop.png' 29 app.jinja_env.globals['use_custom_header_image'] = False 30 31 32 def logged_in(): 33 if 'logged_in' in session: 34 return True 35 36 37 def login_required(f): 38 @wraps(f) 39 def decorated_function(*args, **kwargs): 40 if not logged_in(): 41 return redirect(url_for('lookup')) 42 return f(*args, **kwargs) 43 return decorated_function 44 45 46 def ignore_static(f): 47 """Only executes the wrapped function if we're not loading a static resource.""" 48 @wraps(f) 49 def decorated_function(*args, **kwargs): 50 if request.path.startswith('/static'): 51 return # don't execute the decorated function 52 return f(*args, **kwargs) 53 return decorated_function 54 55 56 @app.before_request 57 @ignore_static 58 def setup_g(): 59 """Store commonly used values in Flask's special g object""" 60 # ignore_static here because `crypto_util.shash` is bcrypt (very time consuming), 61 # and we don't need to waste time running if we're just serving a static 62 # resource that won't need to access these common values. 63 if logged_in(): 64 g.flagged = session['flagged'] 65 g.codename = session['codename'] 66 g.sid = crypto_util.shash(g.codename) 67 g.loc = store.path(g.sid) 68 69 70 @app.before_request 71 @ignore_static 72 def check_tor2web(): 73 # ignore_static here so we only flash a single message warning about Tor2Web, 74 # corresponding to the intial page load. 75 if 'X-tor2web' in request.headers: 76 flash('<strong>WARNING:</strong> You appear to be using Tor2Web. ' 77 'This <strong>does not</strong> provide anonymity. ' 78 '<a href="/tor2web-warning">Why is this dangerous?</a>', 79 "header-warning") 80 81 82 @app.after_request 83 def no_cache(response): 84 """Minimize potential traces of site access by telling the browser not to 85 cache anything""" 86 no_cache_headers = { 87 'Cache-Control': 'no-cache, no-store, must-revalidate', 88 'Pragma': 'no-cache', 89 'Expires': '-1', 90 } 91 for header, header_value in no_cache_headers.iteritems(): 92 response.headers.add(header, header_value) 93 return response 94 95 96 @app.route('/') 97 def index(): 98 return render_template('index.html') 99 100 101 @app.route('/generate', methods=('GET', 'POST')) 102 def generate(): 103 number_words = 8 104 if request.method == 'POST': 105 number_words = int(request.form['number-words']) 106 if number_words not in range(7, 11): 107 abort(403) 108 session['codename'] = crypto_util.genrandomid(number_words) 109 return render_template('generate.html', codename=session['codename']) 110 111 112 @app.route('/create', methods=['POST']) 113 def create(): 114 sid = crypto_util.shash(session['codename']) 115 if os.path.exists(store.path(sid)): 116 # if this happens, we're not using very secure crypto 117 store.log("Got a duplicate ID '%s'" % sid) 118 else: 119 os.mkdir(store.path(sid)) 120 session['logged_in'] = True 121 session['flagged'] = False 122 return redirect(url_for('lookup')) 123 124 125 @app.route('/lookup', methods=('GET',)) 126 @login_required 127 def lookup(): 128 msgs = [] 129 flagged = False 130 for fn in os.listdir(g.loc): 131 if fn == '_FLAG': 132 flagged = True 133 continue 134 if fn.startswith('reply-'): 135 msgs.append(dict( 136 id=fn, 137 date=str( 138 datetime.fromtimestamp( 139 os.stat(store.path(g.sid, fn)).st_mtime)), 140 msg=crypto_util.decrypt( 141 g.sid, g.codename, file(store.path(g.sid, fn)).read()) 142 )) 143 if flagged: 144 session['flagged'] = True 145 146 def async_genkey(sid, codename): 147 with app.app_context(): 148 background.execute(lambda: crypto_util.genkeypair(sid, codename)) 149 150 # Generate a keypair to encrypt replies from the journalist 151 # Only do this if the journalist has flagged the source as one 152 # that they would like to reply to. (Issue #140.) 153 if not crypto_util.getkey(g.sid) and flagged: 154 async_genkey(g.sid, g.codename) 155 156 return render_template( 157 'lookup.html', codename=g.codename, msgs=msgs, flagged=flagged, 158 haskey=crypto_util.getkey(g.sid)) 159 160 161 @app.route('/submit', methods=('POST',)) 162 @login_required 163 def submit(): 164 msg = request.form['msg'] 165 fh = request.files['fh'] 166 167 if msg: 168 msg_loc = store.path(g.sid, '%s_msg.gpg' % uuid.uuid4()) 169 crypto_util.encrypt(config.JOURNALIST_KEY, msg, msg_loc) 170 flash("Thanks! We received your message.", "notification") 171 if fh: 172 file_loc = store.path(g.sid, "%s_doc.zip.gpg" % uuid.uuid4()) 173 174 s = StringIO() 175 zip_file = zipfile.ZipFile(s, 'w') 176 zip_file.writestr(fh.filename, fh.read()) 177 zip_file.close() 178 s.reset() 179 180 crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc) 181 flash("Thanks! We received your document '%s'." 182 % fh.filename or '[unnamed]', "notification") 183 184 return redirect(url_for('lookup')) 185 186 187 @app.route('/delete', methods=('POST',)) 188 @login_required 189 def delete(): 190 msgid = request.form['msgid'] 191 assert '/' not in msgid 192 potential_files = os.listdir(g.loc) 193 if msgid not in potential_files: 194 abort(404) # TODO are the checks necessary? 195 crypto_util.secureunlink(store.path(g.sid, msgid)) 196 flash("Reply deleted.", "notification") 197 198 return redirect(url_for('lookup')) 199 200 201 def valid_codename(codename): 202 return os.path.exists(store.path(crypto_util.shash(codename))) 203 204 205 @app.route('/login', methods=('GET', 'POST')) 206 def login(): 207 if request.method == 'POST': 208 codename = request.form['codename'] 209 if valid_codename(codename): 210 session.update(codename=codename, logged_in=True) 211 return redirect(url_for('lookup')) 212 else: 213 flash("Sorry, that is not a recognized codename.", "error") 214 return render_template('login.html') 215 216 217 @app.route('/howto-disable-js') 218 def howto_disable_js(): 219 return render_template("howto-disable-js.html") 220 221 222 @app.route('/tor2web-warning') 223 def tor2web_warning(): 224 return render_template("tor2web-warning.html") 225 226 227 @app.route('/journalist-key') 228 def download_journalist_pubkey(): 229 journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY) 230 return send_file(StringIO(journalist_pubkey), 231 mimetype="application/pgp-keys", 232 attachment_filename=config.JOURNALIST_KEY + ".asc", 233 as_attachment=True) 234 235 236 @app.route('/why-journalist-key') 237 def why_download_journalist_pubkey(): 238 return render_template("why-journalist-key.html") 239 240 241 @app.errorhandler(404) 242 def page_not_found(error): 243 return render_template('notfound.html'), 404 244 245 if __name__ == "__main__": 246 # TODO make sure debug is not on in production 247 app.run(debug=True, port=8080) 248 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/source.py b/securedrop/source.py --- a/securedrop/source.py +++ b/securedrop/source.py @@ -61,7 +61,9 @@ # and we don't need to waste time running if we're just serving a static # resource that won't need to access these common values. if logged_in(): - g.flagged = session['flagged'] + # We use session.get (which defaults to None if 'flagged' is not in the + # session) to avoid a KeyError on the redirect from login/ to lookup/ + g.flagged = session.get('flagged') g.codename = session['codename'] g.sid = crypto_util.shash(g.codename) g.loc = store.path(g.sid) @@ -128,6 +130,8 @@ msgs = [] flagged = False for fn in os.listdir(g.loc): + # TODO: make 'flag' a db column, so we can replace this with a db + # lookup in the future if fn == '_FLAG': flagged = True continue
{"golden_diff": "diff --git a/securedrop/source.py b/securedrop/source.py\n--- a/securedrop/source.py\n+++ b/securedrop/source.py\n@@ -61,7 +61,9 @@\n # and we don't need to waste time running if we're just serving a static\n # resource that won't need to access these common values.\n if logged_in():\n- g.flagged = session['flagged']\n+ # We use session.get (which defaults to None if 'flagged' is not in the\n+ # session) to avoid a KeyError on the redirect from login/ to lookup/\n+ g.flagged = session.get('flagged')\n g.codename = session['codename']\n g.sid = crypto_util.shash(g.codename)\n g.loc = store.path(g.sid)\n@@ -128,6 +130,8 @@\n msgs = []\n flagged = False\n for fn in os.listdir(g.loc):\n+ # TODO: make 'flag' a db column, so we can replace this with a db\n+ # lookup in the future\n if fn == '_FLAG':\n flagged = True\n continue\n", "issue": "Returning \"flagged for reply\" sources generate an error\nWhen a source is flagged for a reply.\nAnd the source returns to the site after initial session is ended. \nIt generates the error below.\n\n```\n[Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET]\n[Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last):\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1817, in\nwsgi_app\n[Mon Dec 02 21:49:44 2013] [error] response =\nself.full_dispatch_request()\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1477, in\nfull_dispatch_request\n[Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e)\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1381, in\nhandle_user_exception\n[Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb)\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1473, in\nfull_dispatch_request\n[Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request()\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1666, in\npreprocess_request\n[Mon Dec 02 21:49:44 2013] [error] rv = func()\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/var/www/securedrop/source.py\", line 52, in decorated_function\n[Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs)\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/var/www/securedrop/source.py\", line 64, in setup_g\n[Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged']\n[Mon Dec 02 21:49:44 2013] [error] File\n\"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py\", line 368, in\n<lambda>\n[Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i:\nx._get_current_object()[i]\n[Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nimport zipfile\nfrom cStringIO import StringIO\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.shash` is bcrypt (very time consuming),\n # and we don't need to waste time running if we're just serving a static\n # resource that won't need to access these common values.\n if logged_in():\n g.flagged = session['flagged']\n g.codename = session['codename']\n g.sid = crypto_util.shash(g.codename)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]_request\ndef no_cache(response):\n \"\"\"Minimize potential traces of site access by telling the browser not to\n cache anything\"\"\"\n no_cache_headers = {\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '-1',\n }\n for header, header_value in no_cache_headers.iteritems():\n response.headers.add(header, header_value)\n return response\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.shash(session['codename'])\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n store.log(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n session['logged_in'] = True\n session['flagged'] = False\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n msgs = []\n flagged = False\n for fn in os.listdir(g.loc):\n if fn == '_FLAG':\n flagged = True\n continue\n if fn.startswith('reply-'):\n msgs.append(dict(\n id=fn,\n date=str(\n datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime)),\n msg=crypto_util.decrypt(\n g.sid, g.codename, file(store.path(g.sid, fn)).read())\n ))\n if flagged:\n session['flagged'] = True\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template(\n 'lookup.html', codename=g.codename, msgs=msgs, flagged=flagged,\n haskey=crypto_util.getkey(g.sid))\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n\n if msg:\n msg_loc = store.path(g.sid, '%s_msg.gpg' % uuid.uuid4())\n crypto_util.encrypt(config.JOURNALIST_KEY, msg, msg_loc)\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n file_loc = store.path(g.sid, \"%s_doc.zip.gpg\" % uuid.uuid4())\n\n s = StringIO()\n zip_file = zipfile.ZipFile(s, 'w')\n zip_file.writestr(fh.filename, fh.read())\n zip_file.close()\n s.reset()\n\n crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc)\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n crypto_util.secureunlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.shash(codename)))\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, port=8080)\n", "path": "securedrop/source.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nfrom datetime import datetime\nimport uuid\nfrom functools import wraps\n\nfrom flask import (Flask, request, render_template, session, redirect, url_for,\n flash, abort, g, send_file)\nfrom flask_wtf.csrf import CsrfProtect\n\nimport config\nimport version\nimport crypto_util\nimport store\nimport background\nimport zipfile\nfrom cStringIO import StringIO\n\napp = Flask(__name__, template_folder=config.SOURCE_TEMPLATES_DIR)\napp.config.from_object(config.FlaskConfig)\nCsrfProtect(app)\n\napp.jinja_env.globals['version'] = version.__version__\nif getattr(config, 'CUSTOM_HEADER_IMAGE', None):\n app.jinja_env.globals['header_image'] = config.CUSTOM_HEADER_IMAGE\n app.jinja_env.globals['use_custom_header_image'] = True\nelse:\n app.jinja_env.globals['header_image'] = 'securedrop.png'\n app.jinja_env.globals['use_custom_header_image'] = False\n\n\ndef logged_in():\n if 'logged_in' in session:\n return True\n\n\ndef login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not logged_in():\n return redirect(url_for('lookup'))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef ignore_static(f):\n \"\"\"Only executes the wrapped function if we're not loading a static resource.\"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if request.path.startswith('/static'):\n return # don't execute the decorated function\n return f(*args, **kwargs)\n return decorated_function\n\n\[email protected]_request\n@ignore_static\ndef setup_g():\n \"\"\"Store commonly used values in Flask's special g object\"\"\"\n # ignore_static here because `crypto_util.shash` is bcrypt (very time consuming),\n # and we don't need to waste time running if we're just serving a static\n # resource that won't need to access these common values.\n if logged_in():\n # We use session.get (which defaults to None if 'flagged' is not in the\n # session) to avoid a KeyError on the redirect from login/ to lookup/\n g.flagged = session.get('flagged')\n g.codename = session['codename']\n g.sid = crypto_util.shash(g.codename)\n g.loc = store.path(g.sid)\n\n\[email protected]_request\n@ignore_static\ndef check_tor2web():\n # ignore_static here so we only flash a single message warning about Tor2Web,\n # corresponding to the intial page load.\n if 'X-tor2web' in request.headers:\n flash('<strong>WARNING:</strong> You appear to be using Tor2Web. '\n 'This <strong>does not</strong> provide anonymity. '\n '<a href=\"/tor2web-warning\">Why is this dangerous?</a>',\n \"header-warning\")\n\n\[email protected]_request\ndef no_cache(response):\n \"\"\"Minimize potential traces of site access by telling the browser not to\n cache anything\"\"\"\n no_cache_headers = {\n 'Cache-Control': 'no-cache, no-store, must-revalidate',\n 'Pragma': 'no-cache',\n 'Expires': '-1',\n }\n for header, header_value in no_cache_headers.iteritems():\n response.headers.add(header, header_value)\n return response\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/generate', methods=('GET', 'POST'))\ndef generate():\n number_words = 8\n if request.method == 'POST':\n number_words = int(request.form['number-words'])\n if number_words not in range(7, 11):\n abort(403)\n session['codename'] = crypto_util.genrandomid(number_words)\n return render_template('generate.html', codename=session['codename'])\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n sid = crypto_util.shash(session['codename'])\n if os.path.exists(store.path(sid)):\n # if this happens, we're not using very secure crypto\n store.log(\"Got a duplicate ID '%s'\" % sid)\n else:\n os.mkdir(store.path(sid))\n session['logged_in'] = True\n session['flagged'] = False\n return redirect(url_for('lookup'))\n\n\[email protected]('/lookup', methods=('GET',))\n@login_required\ndef lookup():\n msgs = []\n flagged = False\n for fn in os.listdir(g.loc):\n # TODO: make 'flag' a db column, so we can replace this with a db\n # lookup in the future\n if fn == '_FLAG':\n flagged = True\n continue\n if fn.startswith('reply-'):\n msgs.append(dict(\n id=fn,\n date=str(\n datetime.fromtimestamp(\n os.stat(store.path(g.sid, fn)).st_mtime)),\n msg=crypto_util.decrypt(\n g.sid, g.codename, file(store.path(g.sid, fn)).read())\n ))\n if flagged:\n session['flagged'] = True\n\n def async_genkey(sid, codename):\n with app.app_context():\n background.execute(lambda: crypto_util.genkeypair(sid, codename))\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not crypto_util.getkey(g.sid) and flagged:\n async_genkey(g.sid, g.codename)\n\n return render_template(\n 'lookup.html', codename=g.codename, msgs=msgs, flagged=flagged,\n haskey=crypto_util.getkey(g.sid))\n\n\[email protected]('/submit', methods=('POST',))\n@login_required\ndef submit():\n msg = request.form['msg']\n fh = request.files['fh']\n\n if msg:\n msg_loc = store.path(g.sid, '%s_msg.gpg' % uuid.uuid4())\n crypto_util.encrypt(config.JOURNALIST_KEY, msg, msg_loc)\n flash(\"Thanks! We received your message.\", \"notification\")\n if fh:\n file_loc = store.path(g.sid, \"%s_doc.zip.gpg\" % uuid.uuid4())\n\n s = StringIO()\n zip_file = zipfile.ZipFile(s, 'w')\n zip_file.writestr(fh.filename, fh.read())\n zip_file.close()\n s.reset()\n\n crypto_util.encrypt(config.JOURNALIST_KEY, s, file_loc)\n flash(\"Thanks! We received your document '%s'.\"\n % fh.filename or '[unnamed]', \"notification\")\n\n return redirect(url_for('lookup'))\n\n\[email protected]('/delete', methods=('POST',))\n@login_required\ndef delete():\n msgid = request.form['msgid']\n assert '/' not in msgid\n potential_files = os.listdir(g.loc)\n if msgid not in potential_files:\n abort(404) # TODO are the checks necessary?\n crypto_util.secureunlink(store.path(g.sid, msgid))\n flash(\"Reply deleted.\", \"notification\")\n\n return redirect(url_for('lookup'))\n\n\ndef valid_codename(codename):\n return os.path.exists(store.path(crypto_util.shash(codename)))\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n codename = request.form['codename']\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('lookup'))\n else:\n flash(\"Sorry, that is not a recognized codename.\", \"error\")\n return render_template('login.html')\n\n\[email protected]('/howto-disable-js')\ndef howto_disable_js():\n return render_template(\"howto-disable-js.html\")\n\n\[email protected]('/tor2web-warning')\ndef tor2web_warning():\n return render_template(\"tor2web-warning.html\")\n\n\[email protected]('/journalist-key')\ndef download_journalist_pubkey():\n journalist_pubkey = crypto_util.gpg.export_keys(config.JOURNALIST_KEY)\n return send_file(StringIO(journalist_pubkey),\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n\[email protected]('/why-journalist-key')\ndef why_download_journalist_pubkey():\n return render_template(\"why-journalist-key.html\")\n\n\[email protected](404)\ndef page_not_found(error):\n return render_template('notfound.html'), 404\n\nif __name__ == \"__main__\":\n # TODO make sure debug is not on in production\n app.run(debug=True, port=8080)\n", "path": "securedrop/source.py"}]}
3,553
252
gh_patches_debug_510
rasdani/github-patches
git_diff
great-expectations__great_expectations-4829
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py` Content: ``` 1 from typing import Any, List, Union 2 3 import pygeos 4 5 from great_expectations.core import ExpectationValidationResult 6 from great_expectations.execution_engine import PandasExecutionEngine 7 from great_expectations.expectations.expectation import ( 8 ColumnMapExpectation, 9 ExpectationConfiguration, 10 ) 11 from great_expectations.expectations.metrics import ( 12 ColumnMapMetricProvider, 13 column_condition_partial, 14 ) 15 from great_expectations.expectations.util import render_evaluation_parameter_string 16 from great_expectations.render.renderer.renderer import renderer 17 from great_expectations.render.types import ( 18 RenderedBulletListContent, 19 RenderedGraphContent, 20 RenderedStringTemplateContent, 21 RenderedTableContent, 22 ) 23 from great_expectations.render.util import num_to_str, substitute_none_for_missing 24 25 26 # This class defines a Metric to support your Expectation. 27 # For most ColumnMapExpectations, the main business logic for calculation will live in this class. 28 class ColumnValuesValidGeojson(ColumnMapMetricProvider): 29 30 # This is the id string that will be used to reference your metric. 31 condition_metric_name = "column_values.valid_geojson" 32 condition_value_keys = () 33 34 # This method implements the core logic for the PandasExecutionEngine 35 @column_condition_partial(engine=PandasExecutionEngine) 36 def _pandas(cls, column, **kwargs): 37 # Check if values is a valid GeoJSON by parsing it and returning False if there's an error 38 def valid_geojson(value): 39 try: 40 pygeos.from_geojson(value) 41 return True 42 except pygeos.GEOSException: 43 return False 44 45 column = column.apply(valid_geojson) 46 return column 47 48 49 # This class defines the Expectation itself 50 class ExpectColumnValuesToBeValidGeojson(ColumnMapExpectation): 51 """Expect values in a column to be valid geojson strings as defined in https://geojson.org/. 52 Note that this makes use of https://pygeos.readthedocs.io/en/stable/io.html#pygeos.io.from_geojson which has some limitations. 53 54 Args: 55 column (str): \ 56 The column name. 57 58 """ 59 60 # These examples will be shown in the public gallery. 61 # They will also be executed as unit tests for your Expectation. 62 examples = [ 63 { 64 "data": { 65 "valid_geojson": [ 66 """{ 67 "type": "Feature", 68 "geometry": { 69 "type": "Point", 70 "coordinates": [125.6, 10.1] 71 }, 72 "properties": { 73 "name": "Dinagat Islands" 74 } 75 }""", 76 '{"type": "Point","coordinates": [1, 2]}', 77 '{"type": "Point","coordinates": [5, 6]}', 78 ], 79 "invalid_geojson": [ 80 "{}", 81 "{ 'type': 'Feature' }", 82 "", 83 ], 84 }, 85 "tests": [ 86 { 87 "title": "basic_positive_test", 88 "exact_match_out": False, 89 "include_in_gallery": True, 90 "in": { 91 "column": "valid_geojson", 92 "mostly": 1, 93 }, 94 "out": {"success": True}, 95 }, 96 { 97 "title": "basic_negative_test", 98 "exact_match_out": False, 99 "include_in_gallery": True, 100 "in": { 101 "column": "invalid_geojson", 102 "mostly": 0.2, 103 }, 104 "out": {"success": False}, 105 }, 106 ], 107 }, 108 ] 109 110 # This is the id string of the Metric used by this Expectation. 111 # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. 112 map_metric = "column_values.valid_geojson" 113 114 # This is a list of parameter names that can affect whether the Expectation evaluates to True or False 115 success_keys = ("mostly",) 116 117 # This dictionary contains default values for any parameters that should have default values 118 default_kwarg_values = { 119 "mostly": 1, 120 } 121 122 # This object contains metadata for display in the public Gallery 123 library_metadata = { 124 "tags": [ 125 "geospatial", 126 "hackathon-2022", 127 ], # Tags for this Expectation in the Gallery 128 "contributors": [ # Github handles for all contributors to this Expectation. 129 "@mmi333", # Don't forget to add your github handle here! 130 ], 131 "requirements": ["pygeos"], 132 } 133 134 @classmethod 135 @renderer(renderer_type="renderer.prescriptive") 136 @render_evaluation_parameter_string 137 def _prescriptive_renderer( 138 cls, 139 configuration: ExpectationConfiguration = None, 140 result: ExpectationValidationResult = None, 141 language: str = None, 142 runtime_configuration: dict = None, 143 **kwargs, 144 ) -> List[ 145 Union[ 146 dict, 147 str, 148 RenderedStringTemplateContent, 149 RenderedTableContent, 150 RenderedBulletListContent, 151 RenderedGraphContent, 152 Any, 153 ] 154 ]: 155 runtime_configuration = runtime_configuration or {} 156 include_column_name = runtime_configuration.get("include_column_name", True) 157 include_column_name = ( 158 include_column_name if include_column_name is not None else True 159 ) 160 styling = runtime_configuration.get("styling") 161 params = substitute_none_for_missing( 162 configuration.kwargs, 163 [ 164 "column", 165 "mostly", 166 ], 167 ) 168 169 if params["mostly"] is None: 170 template_str = "values must be valid geojson strings" 171 else: 172 if params["mostly"] is not None: 173 params["mostly_pct"] = num_to_str( 174 params["mostly"] * 100, precision=15, no_scientific=True 175 ) 176 template_str += ", at least $mostly_pct % of the time." 177 else: 178 template_str += "." 179 180 if include_column_name: 181 template_str = f"$column {template_str}" 182 183 return [ 184 RenderedStringTemplateContent( 185 **{ 186 "content_block_type": "string_template", 187 "string_template": { 188 "template": template_str, 189 "params": params, 190 "styling": styling, 191 }, 192 } 193 ) 194 ] 195 196 197 if __name__ == "__main__": 198 ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist() ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py b/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py --- a/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py +++ b/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py @@ -195,4 +195,4 @@ if __name__ == "__main__": - ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist() \ No newline at end of file + ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist()
{"golden_diff": "diff --git a/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py b/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py\n--- a/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py\n+++ b/contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py\n@@ -195,4 +195,4 @@\n \n \n if __name__ == \"__main__\":\n- ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist()\n\\ No newline at end of file\n+ ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, List, Union\n\nimport pygeos\n\nfrom great_expectations.core import ExpectationValidationResult\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n ExpectationConfiguration,\n)\nfrom great_expectations.expectations.metrics import (\n ColumnMapMetricProvider,\n column_condition_partial,\n)\nfrom great_expectations.expectations.util import render_evaluation_parameter_string\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.types import (\n RenderedBulletListContent,\n RenderedGraphContent,\n RenderedStringTemplateContent,\n RenderedTableContent,\n)\nfrom great_expectations.render.util import num_to_str, substitute_none_for_missing\n\n\n# This class defines a Metric to support your Expectation.\n# For most ColumnMapExpectations, the main business logic for calculation will live in this class.\nclass ColumnValuesValidGeojson(ColumnMapMetricProvider):\n\n # This is the id string that will be used to reference your metric.\n condition_metric_name = \"column_values.valid_geojson\"\n condition_value_keys = ()\n\n # This method implements the core logic for the PandasExecutionEngine\n @column_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column, **kwargs):\n # Check if values is a valid GeoJSON by parsing it and returning False if there's an error\n def valid_geojson(value):\n try:\n pygeos.from_geojson(value)\n return True\n except pygeos.GEOSException:\n return False\n\n column = column.apply(valid_geojson)\n return column\n\n\n# This class defines the Expectation itself\nclass ExpectColumnValuesToBeValidGeojson(ColumnMapExpectation):\n \"\"\"Expect values in a column to be valid geojson strings as defined in https://geojson.org/.\n Note that this makes use of https://pygeos.readthedocs.io/en/stable/io.html#pygeos.io.from_geojson which has some limitations.\n\n Args:\n column (str): \\\n The column name.\n\n \"\"\"\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = [\n {\n \"data\": {\n \"valid_geojson\": [\n \"\"\"{\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [125.6, 10.1]\n },\n \"properties\": {\n \"name\": \"Dinagat Islands\"\n }\n }\"\"\",\n '{\"type\": \"Point\",\"coordinates\": [1, 2]}',\n '{\"type\": \"Point\",\"coordinates\": [5, 6]}',\n ],\n \"invalid_geojson\": [\n \"{}\",\n \"{ 'type': 'Feature' }\",\n \"\",\n ],\n },\n \"tests\": [\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column\": \"valid_geojson\",\n \"mostly\": 1,\n },\n \"out\": {\"success\": True},\n },\n {\n \"title\": \"basic_negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column\": \"invalid_geojson\",\n \"mostly\": 0.2,\n },\n \"out\": {\"success\": False},\n },\n ],\n },\n ]\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n map_metric = \"column_values.valid_geojson\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n success_keys = (\"mostly\",)\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {\n \"mostly\": 1,\n }\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [\n \"geospatial\",\n \"hackathon-2022\",\n ], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@mmi333\", # Don't forget to add your github handle here!\n ],\n \"requirements\": [\"pygeos\"],\n }\n\n @classmethod\n @renderer(renderer_type=\"renderer.prescriptive\")\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration: ExpectationConfiguration = None,\n result: ExpectationValidationResult = None,\n language: str = None,\n runtime_configuration: dict = None,\n **kwargs,\n ) -> List[\n Union[\n dict,\n str,\n RenderedStringTemplateContent,\n RenderedTableContent,\n RenderedBulletListContent,\n RenderedGraphContent,\n Any,\n ]\n ]:\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\n \"column\",\n \"mostly\",\n ],\n )\n\n if params[\"mostly\"] is None:\n template_str = \"values must be valid geojson strings\"\n else:\n if params[\"mostly\"] is not None:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = f\"$column {template_str}\"\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n\n\nif __name__ == \"__main__\":\n ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist()", "path": "contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py"}], "after_files": [{"content": "from typing import Any, List, Union\n\nimport pygeos\n\nfrom great_expectations.core import ExpectationValidationResult\nfrom great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.expectation import (\n ColumnMapExpectation,\n ExpectationConfiguration,\n)\nfrom great_expectations.expectations.metrics import (\n ColumnMapMetricProvider,\n column_condition_partial,\n)\nfrom great_expectations.expectations.util import render_evaluation_parameter_string\nfrom great_expectations.render.renderer.renderer import renderer\nfrom great_expectations.render.types import (\n RenderedBulletListContent,\n RenderedGraphContent,\n RenderedStringTemplateContent,\n RenderedTableContent,\n)\nfrom great_expectations.render.util import num_to_str, substitute_none_for_missing\n\n\n# This class defines a Metric to support your Expectation.\n# For most ColumnMapExpectations, the main business logic for calculation will live in this class.\nclass ColumnValuesValidGeojson(ColumnMapMetricProvider):\n\n # This is the id string that will be used to reference your metric.\n condition_metric_name = \"column_values.valid_geojson\"\n condition_value_keys = ()\n\n # This method implements the core logic for the PandasExecutionEngine\n @column_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column, **kwargs):\n # Check if values is a valid GeoJSON by parsing it and returning False if there's an error\n def valid_geojson(value):\n try:\n pygeos.from_geojson(value)\n return True\n except pygeos.GEOSException:\n return False\n\n column = column.apply(valid_geojson)\n return column\n\n\n# This class defines the Expectation itself\nclass ExpectColumnValuesToBeValidGeojson(ColumnMapExpectation):\n \"\"\"Expect values in a column to be valid geojson strings as defined in https://geojson.org/.\n Note that this makes use of https://pygeos.readthedocs.io/en/stable/io.html#pygeos.io.from_geojson which has some limitations.\n\n Args:\n column (str): \\\n The column name.\n\n \"\"\"\n\n # These examples will be shown in the public gallery.\n # They will also be executed as unit tests for your Expectation.\n examples = [\n {\n \"data\": {\n \"valid_geojson\": [\n \"\"\"{\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [125.6, 10.1]\n },\n \"properties\": {\n \"name\": \"Dinagat Islands\"\n }\n }\"\"\",\n '{\"type\": \"Point\",\"coordinates\": [1, 2]}',\n '{\"type\": \"Point\",\"coordinates\": [5, 6]}',\n ],\n \"invalid_geojson\": [\n \"{}\",\n \"{ 'type': 'Feature' }\",\n \"\",\n ],\n },\n \"tests\": [\n {\n \"title\": \"basic_positive_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column\": \"valid_geojson\",\n \"mostly\": 1,\n },\n \"out\": {\"success\": True},\n },\n {\n \"title\": \"basic_negative_test\",\n \"exact_match_out\": False,\n \"include_in_gallery\": True,\n \"in\": {\n \"column\": \"invalid_geojson\",\n \"mostly\": 0.2,\n },\n \"out\": {\"success\": False},\n },\n ],\n },\n ]\n\n # This is the id string of the Metric used by this Expectation.\n # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.\n map_metric = \"column_values.valid_geojson\"\n\n # This is a list of parameter names that can affect whether the Expectation evaluates to True or False\n success_keys = (\"mostly\",)\n\n # This dictionary contains default values for any parameters that should have default values\n default_kwarg_values = {\n \"mostly\": 1,\n }\n\n # This object contains metadata for display in the public Gallery\n library_metadata = {\n \"tags\": [\n \"geospatial\",\n \"hackathon-2022\",\n ], # Tags for this Expectation in the Gallery\n \"contributors\": [ # Github handles for all contributors to this Expectation.\n \"@mmi333\", # Don't forget to add your github handle here!\n ],\n \"requirements\": [\"pygeos\"],\n }\n\n @classmethod\n @renderer(renderer_type=\"renderer.prescriptive\")\n @render_evaluation_parameter_string\n def _prescriptive_renderer(\n cls,\n configuration: ExpectationConfiguration = None,\n result: ExpectationValidationResult = None,\n language: str = None,\n runtime_configuration: dict = None,\n **kwargs,\n ) -> List[\n Union[\n dict,\n str,\n RenderedStringTemplateContent,\n RenderedTableContent,\n RenderedBulletListContent,\n RenderedGraphContent,\n Any,\n ]\n ]:\n runtime_configuration = runtime_configuration or {}\n include_column_name = runtime_configuration.get(\"include_column_name\", True)\n include_column_name = (\n include_column_name if include_column_name is not None else True\n )\n styling = runtime_configuration.get(\"styling\")\n params = substitute_none_for_missing(\n configuration.kwargs,\n [\n \"column\",\n \"mostly\",\n ],\n )\n\n if params[\"mostly\"] is None:\n template_str = \"values must be valid geojson strings\"\n else:\n if params[\"mostly\"] is not None:\n params[\"mostly_pct\"] = num_to_str(\n params[\"mostly\"] * 100, precision=15, no_scientific=True\n )\n template_str += \", at least $mostly_pct % of the time.\"\n else:\n template_str += \".\"\n\n if include_column_name:\n template_str = f\"$column {template_str}\"\n\n return [\n RenderedStringTemplateContent(\n **{\n \"content_block_type\": \"string_template\",\n \"string_template\": {\n \"template\": template_str,\n \"params\": params,\n \"styling\": styling,\n },\n }\n )\n ]\n\n\nif __name__ == \"__main__\":\n ExpectColumnValuesToBeValidGeojson().print_diagnostic_checklist()\n", "path": "contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_geojson.py"}]}
2,158
169
gh_patches_debug_5767
rasdani/github-patches
git_diff
meltano__meltano-6302
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade pytest Migrated from GitLab: https://gitlab.com/meltano/meltano/-/issues/3529 Originally created by @vischous on 2022-05-25 20:11:16 --- While waiting for some tests to run I was trying to speed up my iteration time, and found `pytest-xdist`. Thread here https://meltano.slack.com/archives/CKHP6G5V4/p1653508630941889 I have a version of pytest-xdist that should work with our version of pytest so I'm good, but we do have an old version of Pytest and I told @WillDaSilva I'd throw an issue in :D ``` PS E:\code\meltano> poetry add -D pytest-xdist Using version ^2.5.0 for pytest-xdist Updating dependencies Resolving dependencies... SolverProblemError Because no versions of pytest-xdist match >2.5.0,<3.0.0 and pytest-xdist (2.5.0) depends on pytest (>=6.2.0), pytest-xdist (>=2.5.0,<3.0.0) requires pytest (>=6.2.0). So, because meltano depends on both pytest (^4.3.1) and pytest-xdist (^2.5.0), version solving failed. at ~\.poetry\lib\poetry\puzzle\solver.py:241 in _solve 237β”‚ packages = result.packages 238β”‚ except OverrideNeeded as e: 239β”‚ return self.solve_in_compatibility_mode(e.overrides, use_latest=use_latest) 240β”‚ except SolveFailure as e: β†’ 241β”‚ raise SolverProblemError(e) 242β”‚ 243β”‚ results = dict( 244β”‚ depth_first_search( 245β”‚ PackageNode(self._package, packages), aggregate_package_nodes ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/meltano/core/plugin/superset.py` Content: ``` 1 """Plugin glue code for Superset.""" 2 import logging 3 import subprocess 4 from typing import List 5 6 import structlog 7 8 from meltano.core.behavior.hookable import hook 9 from meltano.core.error import AsyncSubprocessError 10 from meltano.core.plugin.error import PluginExecutionError 11 from meltano.core.plugin_invoker import PluginInvoker 12 from meltano.core.setting_definition import SettingDefinition 13 14 from . import BasePlugin, PluginType 15 16 logger = structlog.getLogger(__name__) 17 18 19 class SupersetInvoker(PluginInvoker): 20 """Invoker that prepares env for Superset.""" 21 22 def env(self): 23 """Environment variables for Superset. 24 25 Returns: 26 Dictionary of environment variables. 27 """ 28 env = super().env() 29 30 env["SUPERSET_HOME"] = str(self.plugin_config_service.run_dir) 31 env["SUPERSET_CONFIG_PATH"] = str(self.files["config"]) 32 env["FLASK_APP"] = "superset" 33 34 return env 35 36 37 class Superset(BasePlugin): 38 """Plugin glue code for Superset.""" 39 40 __plugin_type__ = PluginType.UTILITIES 41 42 invoker_class = SupersetInvoker 43 44 EXTRA_SETTINGS = [SettingDefinition(name="_config_path")] 45 46 @property 47 def config_files(self): 48 """Return the configuration files required by the plugin. 49 50 Returns: 51 Dictionary of config file identifiers and filenames 52 """ 53 return {"config": "superset_config.py"} 54 55 @hook("before_configure") 56 async def before_configure(self, invoker: SupersetInvoker, session): # noqa: WPS217 57 """Write plugin configuration to superset_config.py. 58 59 Args: 60 invoker: the active PluginInvoker 61 session: metadata database session 62 63 Raises: 64 PluginExecutionError: if config file couldn't be found 65 """ 66 config = invoker.plugin_config_processed 67 68 config_script_lines = [ 69 "import sys", 70 "module = sys.modules[__name__]", 71 f"config = {str(config)}", 72 "for key, value in config.items():", 73 " if key.isupper():", 74 " setattr(module, key, value)", 75 ] 76 77 custom_config_filename = invoker.plugin_config_extras["_config_path"] 78 if custom_config_filename: 79 custom_config_path = invoker.project.root.joinpath(custom_config_filename) 80 81 if custom_config_path.exists(): 82 config_script_lines.extend( 83 [ 84 "from importlib.util import module_from_spec, spec_from_file_location", 85 f'spec = spec_from_file_location("superset_config", {str(custom_config_path)!r})', 86 "custom_config = module_from_spec(spec)", 87 'sys.modules["superset_config"] = custom_config', 88 "spec.loader.exec_module(custom_config)", 89 "for key in dir(custom_config):", 90 " if key.isupper():", 91 " setattr(module, key, getattr(custom_config, key))", 92 ] 93 ) 94 95 logger.info(f"Merged in config from {custom_config_path}") 96 else: 97 raise PluginExecutionError( 98 f"Could not find config file {custom_config_path}" 99 ) 100 101 config_path = invoker.files["config"] 102 with open(config_path, "w") as config_file: 103 config_file.write("\n".join(config_script_lines)) 104 logging.debug(f"Created configuration at {config_path}") 105 106 @hook("before_invoke") 107 async def db_upgrade_hook(self, invoker: PluginInvoker, exec_args: List[str]): 108 """Create or upgrade metadata database. 109 110 Args: 111 invoker: the active PluginInvoker 112 exec_args: the args being passed 113 114 Raises: 115 AsyncSubprocessError: if command failed to run 116 """ 117 handle = await invoker.invoke_async( 118 "db", 119 "upgrade", 120 stdout=subprocess.PIPE, 121 stderr=subprocess.PIPE, 122 ) 123 exit_code = await handle.wait() 124 125 if exit_code: 126 raise AsyncSubprocessError( 127 "Superset metadata database could not be initialized: `superset db upgrade` failed", 128 handle, 129 ) 130 131 logging.debug("Completed `superset db upgrade`") 132 133 @hook("before_invoke") 134 async def init_hook(self, invoker: PluginInvoker, exec_args: List[str]): 135 """Create default roles and permissions. 136 137 Args: 138 invoker: the active PluginInvoker 139 exec_args: the args being passed 140 141 Raises: 142 AsyncSubprocessError: if command failed to run 143 """ 144 handle = await invoker.invoke_async( 145 "init", 146 stdout=subprocess.PIPE, 147 stderr=subprocess.PIPE, 148 ) 149 exit_code = await handle.wait() 150 151 if exit_code: 152 raise AsyncSubprocessError( 153 "Superset default roles and permissions could not be created: `superset init` failed", 154 handle, 155 ) 156 157 logging.debug("Completed `superset init`") 158 159 @hook("before_cleanup") 160 async def before_cleanup(self, invoker: PluginInvoker): 161 """Delete the config file. 162 163 Args: 164 invoker: the active PluginInvoker 165 """ 166 config_file = invoker.files["config"] 167 try: 168 config_file.unlink() 169 logging.debug(f"Deleted configuration at {config_file}") 170 except FileNotFoundError: 171 pass 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/meltano/core/plugin/superset.py b/src/meltano/core/plugin/superset.py --- a/src/meltano/core/plugin/superset.py +++ b/src/meltano/core/plugin/superset.py @@ -68,7 +68,7 @@ config_script_lines = [ "import sys", "module = sys.modules[__name__]", - f"config = {str(config)}", + f"config = {config!r}", "for key, value in config.items():", " if key.isupper():", " setattr(module, key, value)",
{"golden_diff": "diff --git a/src/meltano/core/plugin/superset.py b/src/meltano/core/plugin/superset.py\n--- a/src/meltano/core/plugin/superset.py\n+++ b/src/meltano/core/plugin/superset.py\n@@ -68,7 +68,7 @@\n config_script_lines = [\n \"import sys\",\n \"module = sys.modules[__name__]\",\n- f\"config = {str(config)}\",\n+ f\"config = {config!r}\",\n \"for key, value in config.items():\",\n \" if key.isupper():\",\n \" setattr(module, key, value)\",\n", "issue": "Upgrade pytest\nMigrated from GitLab: https://gitlab.com/meltano/meltano/-/issues/3529\n\nOriginally created by @vischous on 2022-05-25 20:11:16\n\n---\n\nWhile waiting for some tests to run I was trying to speed up my iteration time, and found `pytest-xdist`. \nThread here https://meltano.slack.com/archives/CKHP6G5V4/p1653508630941889\n\nI have a version of pytest-xdist that should work with our version of pytest so I'm good, but we do have an old version of Pytest and I told @WillDaSilva I'd throw an issue in :D \n\n```\nPS E:\\code\\meltano> poetry add -D pytest-xdist\nUsing version ^2.5.0 for pytest-xdist\n\nUpdating dependencies\nResolving dependencies...\n\n SolverProblemError\n\n Because no versions of pytest-xdist match >2.5.0,<3.0.0\n and pytest-xdist (2.5.0) depends on pytest (>=6.2.0), pytest-xdist (>=2.5.0,<3.0.0) requires pytest (>=6.2.0).\n So, because meltano depends on both pytest (^4.3.1) and pytest-xdist (^2.5.0), version solving failed.\n\n at ~\\.poetry\\lib\\poetry\\puzzle\\solver.py:241 in _solve\n 237\u2502 packages = result.packages\n 238\u2502 except OverrideNeeded as e:\n 239\u2502 return self.solve_in_compatibility_mode(e.overrides, use_latest=use_latest)\n 240\u2502 except SolveFailure as e:\n \u2192 241\u2502 raise SolverProblemError(e)\n 242\u2502\n 243\u2502 results = dict(\n 244\u2502 depth_first_search(\n 245\u2502 PackageNode(self._package, packages), aggregate_package_nodes\n```\n", "before_files": [{"content": "\"\"\"Plugin glue code for Superset.\"\"\"\nimport logging\nimport subprocess\nfrom typing import List\n\nimport structlog\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.plugin.error import PluginExecutionError\nfrom meltano.core.plugin_invoker import PluginInvoker\nfrom meltano.core.setting_definition import SettingDefinition\n\nfrom . import BasePlugin, PluginType\n\nlogger = structlog.getLogger(__name__)\n\n\nclass SupersetInvoker(PluginInvoker):\n \"\"\"Invoker that prepares env for Superset.\"\"\"\n\n def env(self):\n \"\"\"Environment variables for Superset.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n env = super().env()\n\n env[\"SUPERSET_HOME\"] = str(self.plugin_config_service.run_dir)\n env[\"SUPERSET_CONFIG_PATH\"] = str(self.files[\"config\"])\n env[\"FLASK_APP\"] = \"superset\"\n\n return env\n\n\nclass Superset(BasePlugin):\n \"\"\"Plugin glue code for Superset.\"\"\"\n\n __plugin_type__ = PluginType.UTILITIES\n\n invoker_class = SupersetInvoker\n\n EXTRA_SETTINGS = [SettingDefinition(name=\"_config_path\")]\n\n @property\n def config_files(self):\n \"\"\"Return the configuration files required by the plugin.\n\n Returns:\n Dictionary of config file identifiers and filenames\n \"\"\"\n return {\"config\": \"superset_config.py\"}\n\n @hook(\"before_configure\")\n async def before_configure(self, invoker: SupersetInvoker, session): # noqa: WPS217\n \"\"\"Write plugin configuration to superset_config.py.\n\n Args:\n invoker: the active PluginInvoker\n session: metadata database session\n\n Raises:\n PluginExecutionError: if config file couldn't be found\n \"\"\"\n config = invoker.plugin_config_processed\n\n config_script_lines = [\n \"import sys\",\n \"module = sys.modules[__name__]\",\n f\"config = {str(config)}\",\n \"for key, value in config.items():\",\n \" if key.isupper():\",\n \" setattr(module, key, value)\",\n ]\n\n custom_config_filename = invoker.plugin_config_extras[\"_config_path\"]\n if custom_config_filename:\n custom_config_path = invoker.project.root.joinpath(custom_config_filename)\n\n if custom_config_path.exists():\n config_script_lines.extend(\n [\n \"from importlib.util import module_from_spec, spec_from_file_location\",\n f'spec = spec_from_file_location(\"superset_config\", {str(custom_config_path)!r})',\n \"custom_config = module_from_spec(spec)\",\n 'sys.modules[\"superset_config\"] = custom_config',\n \"spec.loader.exec_module(custom_config)\",\n \"for key in dir(custom_config):\",\n \" if key.isupper():\",\n \" setattr(module, key, getattr(custom_config, key))\",\n ]\n )\n\n logger.info(f\"Merged in config from {custom_config_path}\")\n else:\n raise PluginExecutionError(\n f\"Could not find config file {custom_config_path}\"\n )\n\n config_path = invoker.files[\"config\"]\n with open(config_path, \"w\") as config_file:\n config_file.write(\"\\n\".join(config_script_lines))\n logging.debug(f\"Created configuration at {config_path}\")\n\n @hook(\"before_invoke\")\n async def db_upgrade_hook(self, invoker: PluginInvoker, exec_args: List[str]):\n \"\"\"Create or upgrade metadata database.\n\n Args:\n invoker: the active PluginInvoker\n exec_args: the args being passed\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n handle = await invoker.invoke_async(\n \"db\",\n \"upgrade\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Superset metadata database could not be initialized: `superset db upgrade` failed\",\n handle,\n )\n\n logging.debug(\"Completed `superset db upgrade`\")\n\n @hook(\"before_invoke\")\n async def init_hook(self, invoker: PluginInvoker, exec_args: List[str]):\n \"\"\"Create default roles and permissions.\n\n Args:\n invoker: the active PluginInvoker\n exec_args: the args being passed\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n handle = await invoker.invoke_async(\n \"init\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Superset default roles and permissions could not be created: `superset init` failed\",\n handle,\n )\n\n logging.debug(\"Completed `superset init`\")\n\n @hook(\"before_cleanup\")\n async def before_cleanup(self, invoker: PluginInvoker):\n \"\"\"Delete the config file.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n logging.debug(f\"Deleted configuration at {config_file}\")\n except FileNotFoundError:\n pass\n", "path": "src/meltano/core/plugin/superset.py"}], "after_files": [{"content": "\"\"\"Plugin glue code for Superset.\"\"\"\nimport logging\nimport subprocess\nfrom typing import List\n\nimport structlog\n\nfrom meltano.core.behavior.hookable import hook\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.plugin.error import PluginExecutionError\nfrom meltano.core.plugin_invoker import PluginInvoker\nfrom meltano.core.setting_definition import SettingDefinition\n\nfrom . import BasePlugin, PluginType\n\nlogger = structlog.getLogger(__name__)\n\n\nclass SupersetInvoker(PluginInvoker):\n \"\"\"Invoker that prepares env for Superset.\"\"\"\n\n def env(self):\n \"\"\"Environment variables for Superset.\n\n Returns:\n Dictionary of environment variables.\n \"\"\"\n env = super().env()\n\n env[\"SUPERSET_HOME\"] = str(self.plugin_config_service.run_dir)\n env[\"SUPERSET_CONFIG_PATH\"] = str(self.files[\"config\"])\n env[\"FLASK_APP\"] = \"superset\"\n\n return env\n\n\nclass Superset(BasePlugin):\n \"\"\"Plugin glue code for Superset.\"\"\"\n\n __plugin_type__ = PluginType.UTILITIES\n\n invoker_class = SupersetInvoker\n\n EXTRA_SETTINGS = [SettingDefinition(name=\"_config_path\")]\n\n @property\n def config_files(self):\n \"\"\"Return the configuration files required by the plugin.\n\n Returns:\n Dictionary of config file identifiers and filenames\n \"\"\"\n return {\"config\": \"superset_config.py\"}\n\n @hook(\"before_configure\")\n async def before_configure(self, invoker: SupersetInvoker, session): # noqa: WPS217\n \"\"\"Write plugin configuration to superset_config.py.\n\n Args:\n invoker: the active PluginInvoker\n session: metadata database session\n\n Raises:\n PluginExecutionError: if config file couldn't be found\n \"\"\"\n config = invoker.plugin_config_processed\n\n config_script_lines = [\n \"import sys\",\n \"module = sys.modules[__name__]\",\n f\"config = {config!r}\",\n \"for key, value in config.items():\",\n \" if key.isupper():\",\n \" setattr(module, key, value)\",\n ]\n\n custom_config_filename = invoker.plugin_config_extras[\"_config_path\"]\n if custom_config_filename:\n custom_config_path = invoker.project.root.joinpath(custom_config_filename)\n\n if custom_config_path.exists():\n config_script_lines.extend(\n [\n \"from importlib.util import module_from_spec, spec_from_file_location\",\n f'spec = spec_from_file_location(\"superset_config\", {str(custom_config_path)!r})',\n \"custom_config = module_from_spec(spec)\",\n 'sys.modules[\"superset_config\"] = custom_config',\n \"spec.loader.exec_module(custom_config)\",\n \"for key in dir(custom_config):\",\n \" if key.isupper():\",\n \" setattr(module, key, getattr(custom_config, key))\",\n ]\n )\n\n logger.info(f\"Merged in config from {custom_config_path}\")\n else:\n raise PluginExecutionError(\n f\"Could not find config file {custom_config_path}\"\n )\n\n config_path = invoker.files[\"config\"]\n with open(config_path, \"w\") as config_file:\n config_file.write(\"\\n\".join(config_script_lines))\n logging.debug(f\"Created configuration at {config_path}\")\n\n @hook(\"before_invoke\")\n async def db_upgrade_hook(self, invoker: PluginInvoker, exec_args: List[str]):\n \"\"\"Create or upgrade metadata database.\n\n Args:\n invoker: the active PluginInvoker\n exec_args: the args being passed\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n handle = await invoker.invoke_async(\n \"db\",\n \"upgrade\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Superset metadata database could not be initialized: `superset db upgrade` failed\",\n handle,\n )\n\n logging.debug(\"Completed `superset db upgrade`\")\n\n @hook(\"before_invoke\")\n async def init_hook(self, invoker: PluginInvoker, exec_args: List[str]):\n \"\"\"Create default roles and permissions.\n\n Args:\n invoker: the active PluginInvoker\n exec_args: the args being passed\n\n Raises:\n AsyncSubprocessError: if command failed to run\n \"\"\"\n handle = await invoker.invoke_async(\n \"init\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n exit_code = await handle.wait()\n\n if exit_code:\n raise AsyncSubprocessError(\n \"Superset default roles and permissions could not be created: `superset init` failed\",\n handle,\n )\n\n logging.debug(\"Completed `superset init`\")\n\n @hook(\"before_cleanup\")\n async def before_cleanup(self, invoker: PluginInvoker):\n \"\"\"Delete the config file.\n\n Args:\n invoker: the active PluginInvoker\n \"\"\"\n config_file = invoker.files[\"config\"]\n try:\n config_file.unlink()\n logging.debug(f\"Deleted configuration at {config_file}\")\n except FileNotFoundError:\n pass\n", "path": "src/meltano/core/plugin/superset.py"}]}
2,251
137
gh_patches_debug_6160
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2741
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG]: Error during running chatgpt train_rm.sh ### πŸ› Describe the bug I run into a runtime error when running `train_rm.sh` under examples of chatgpt. ``` Traceback (most recent call last): File "/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py", line 53, in <module> train(args) File "/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py", line 36, in train trainer.fit(use_lora=args.lora_rank) File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/chatgpt/trainer/rm.py", line 57, in fit loss.backward() File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/_tensor.py", line 488, in backward torch.autograd.backward( File "/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/autograd/__init__.py", line 197, in backward Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn ``` The model is downloaded from Huggingface `bigscience/bloom-560m`. `lora_rank` is `16`. Is there any hints on this? ### Environment Environment: torch 1.13.1+cu113 ------------ Environment ------------ Colossal-AI version: 0.2.4 PyTorch version: 1.13.1 CUDA version: 11.3 CUDA version required by PyTorch: 11.7 Note: 1. The table above checks the versions of the libraries/tools in the current environment 2. If the CUDA version is N/A, you can set the CUDA_HOME environment variable to locate it ------------ CUDA Extensions AOT Compilation ------------ Found AOT CUDA Extension: x PyTorch version used for AOT compilation: N/A CUDA version used for AOT compilation: N/A Note: 1. AOT (ahead-of-time) compilation of the CUDA kernels occurs during installation when the environment varialbe CUDA_EXT=1 is set 2. If AOT compilation is not enabled, stay calm as the CUDA kernels can still be built during runtime ------------ Compatibility ------------ PyTorch version match: N/A System and PyTorch CUDA version match: x System and Colossal-AI CUDA version match: N/A Note: 1. The table above checks the version compatibility of the libraries/tools in the current environment - PyTorch version mistach: whether the PyTorch version in the current environment is compatible with the PyTorch version used for AOT compilation - System and PyTorch CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version required by PyTorch - System and Colossal-AI CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version used for AOT compilation [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `applications/ChatGPT/chatgpt/trainer/rm.py` Content: ``` 1 from abc import ABC 2 3 import loralib as lora 4 from chatgpt.dataset import RewardDataset 5 from chatgpt.nn import PairWiseLoss 6 from torch.optim import Adam 7 from torch.utils.data import DataLoader 8 from tqdm import tqdm 9 10 11 class RewardModelTrainer(ABC): 12 """ 13 Trainer to use while training reward model. 14 15 Args: 16 model (torch.nn.Module): the model to train 17 train_dataset (RewardDataset): the dataset to use for training 18 eval_dataset (RewardDataset): the dataset to use for evaluation 19 batch_size (int, defaults to 1): the batch size while training 20 num_epochs (int, defaults to 2): the number of epochs to train 21 optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer 22 """ 23 24 def __init__(self, 25 model, 26 train_dataset: RewardDataset, 27 eval_dataset: RewardDataset, 28 batch_size: int = 1, 29 num_epochs: int = 2, 30 optim_kwargs: dict = {'lr': 1e-4}) -> None: 31 super().__init__() 32 self.model = model 33 self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size) 34 self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size) 35 self.loss_fn = PairWiseLoss() 36 self.optimizer = Adam(self.model.parameters(), **optim_kwargs) 37 self.epochs = num_epochs 38 39 def fit(self, use_lora): 40 epoch_bar = tqdm(range(self.epochs), desc='Train epoch') 41 for epoch in range(self.epochs): 42 step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch) 43 # train 44 if use_lora > 0: 45 print("Using Lora") 46 lora.mark_only_lora_as_trainable(self.model) 47 else: 48 self.model.train() 49 for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader: 50 chosen_ids = chosen_ids.squeeze(1).cuda() 51 c_mask = c_mask.squeeze(1).cuda() 52 reject_ids = reject_ids.squeeze(1).cuda() 53 r_mask = r_mask.squeeze(1).cuda() 54 chosen_reward = self.model(chosen_ids, attention_mask=c_mask) 55 reject_reward = self.model(reject_ids, attention_mask=r_mask) 56 loss = self.loss_fn(chosen_reward, reject_reward) 57 loss.backward() 58 self.optimizer.step() 59 self.optimizer.zero_grad() 60 step_bar.update() 61 step_bar.set_postfix({'loss': loss.item()}) 62 63 # eval 64 self.model.eval() 65 for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader: 66 dist = 0 67 chosen_ids = chosen_ids.squeeze(1).cuda() 68 c_mask = c_mask.squeeze(1).cuda() 69 reject_ids = reject_ids.squeeze(1).cuda() 70 r_mask = r_mask.squeeze(1).cuda() 71 chosen_reward = self.model(chosen_ids, attention_mask=c_mask) 72 reject_reward = self.model(reject_ids, attention_mask=r_mask) 73 dist += (chosen_reward - reject_reward) 74 dist_mean = dist / self.eval_dataloader.__len__() 75 epoch_bar.update() 76 step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()}) 77 step_bar.close() 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py --- a/applications/ChatGPT/chatgpt/trainer/rm.py +++ b/applications/ChatGPT/chatgpt/trainer/rm.py @@ -43,7 +43,7 @@ # train if use_lora > 0: print("Using Lora") - lora.mark_only_lora_as_trainable(self.model) + lora.mark_only_lora_as_trainable(self.model.model) else: self.model.train() for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py\n--- a/applications/ChatGPT/chatgpt/trainer/rm.py\n+++ b/applications/ChatGPT/chatgpt/trainer/rm.py\n@@ -43,7 +43,7 @@\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n- lora.mark_only_lora_as_trainable(self.model)\n+ lora.mark_only_lora_as_trainable(self.model.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n", "issue": "[BUG]: Error during running chatgpt train_rm.sh \n### \ud83d\udc1b Describe the bug\n\nI run into a runtime error when running `train_rm.sh` under examples of chatgpt.\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py\", line 53, in <module>\r\n train(args)\r\n File \"/root/ColossalAI/applications/ChatGPT/examples/train_reward_model.py\", line 36, in train\r\n trainer.fit(use_lora=args.lora_rank)\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/chatgpt/trainer/rm.py\", line 57, in fit\r\n loss.backward()\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/_tensor.py\", line 488, in backward\r\n torch.autograd.backward(\r\n File \"/root/miniconda3/envs/chatgpt/lib/python3.10/site-packages/torch/autograd/__init__.py\", line 197, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\r\n```\r\n\r\n\r\nThe model is downloaded from Huggingface `bigscience/bloom-560m`. `lora_rank` is `16`. Is there any hints on this?\r\n\n\n### Environment\n\nEnvironment:\r\ntorch 1.13.1+cu113\r\n\r\n------------ Environment ------------\r\nColossal-AI version: 0.2.4\r\nPyTorch version: 1.13.1\r\nCUDA version: 11.3\r\nCUDA version required by PyTorch: 11.7\r\n\r\nNote:\r\n1. The table above checks the versions of the libraries/tools in the current environment\r\n2. If the CUDA version is N/A, you can set the CUDA_HOME environment variable to locate it\r\n\r\n------------ CUDA Extensions AOT Compilation ------------\r\nFound AOT CUDA Extension: x\r\nPyTorch version used for AOT compilation: N/A\r\nCUDA version used for AOT compilation: N/A\r\n\r\nNote:\r\n1. AOT (ahead-of-time) compilation of the CUDA kernels occurs during installation when the environment varialbe CUDA_EXT=1 is set\r\n2. If AOT compilation is not enabled, stay calm as the CUDA kernels can still be built during runtime\r\n\r\n------------ Compatibility ------------\r\nPyTorch version match: N/A\r\nSystem and PyTorch CUDA version match: x\r\nSystem and Colossal-AI CUDA version match: N/A\r\n\r\nNote:\r\n1. The table above checks the version compatibility of the libraries/tools in the current environment\r\n - PyTorch version mistach: whether the PyTorch version in the current environment is compatible with the PyTorch version used for AOT compilation\r\n - System and PyTorch CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version required by PyTorch\r\n - System and Colossal-AI CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version used for AOT compilation\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC\n\nimport loralib as lora\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import PairWiseLoss\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nclass RewardModelTrainer(ABC):\n \"\"\"\n Trainer to use while training reward model.\n\n Args:\n model (torch.nn.Module): the model to train\n train_dataset (RewardDataset): the dataset to use for training\n eval_dataset (RewardDataset): the dataset to use for evaluation\n batch_size (int, defaults to 1): the batch size while training\n num_epochs (int, defaults to 2): the number of epochs to train\n optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer\n \"\"\"\n\n def __init__(self,\n model,\n train_dataset: RewardDataset,\n eval_dataset: RewardDataset,\n batch_size: int = 1,\n num_epochs: int = 2,\n optim_kwargs: dict = {'lr': 1e-4}) -> None:\n super().__init__()\n self.model = model\n self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n self.loss_fn = PairWiseLoss()\n self.optimizer = Adam(self.model.parameters(), **optim_kwargs)\n self.epochs = num_epochs\n\n def fit(self, use_lora):\n epoch_bar = tqdm(range(self.epochs), desc='Train epoch')\n for epoch in range(self.epochs):\n step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch)\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n lora.mark_only_lora_as_trainable(self.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n loss = self.loss_fn(chosen_reward, reject_reward)\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n step_bar.update()\n step_bar.set_postfix({'loss': loss.item()})\n\n # eval\n self.model.eval()\n for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:\n dist = 0\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n dist += (chosen_reward - reject_reward)\n dist_mean = dist / self.eval_dataloader.__len__()\n epoch_bar.update()\n step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()})\n step_bar.close()\n", "path": "applications/ChatGPT/chatgpt/trainer/rm.py"}], "after_files": [{"content": "from abc import ABC\n\nimport loralib as lora\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import PairWiseLoss\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\nclass RewardModelTrainer(ABC):\n \"\"\"\n Trainer to use while training reward model.\n\n Args:\n model (torch.nn.Module): the model to train\n train_dataset (RewardDataset): the dataset to use for training\n eval_dataset (RewardDataset): the dataset to use for evaluation\n batch_size (int, defaults to 1): the batch size while training\n num_epochs (int, defaults to 2): the number of epochs to train\n optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer\n \"\"\"\n\n def __init__(self,\n model,\n train_dataset: RewardDataset,\n eval_dataset: RewardDataset,\n batch_size: int = 1,\n num_epochs: int = 2,\n optim_kwargs: dict = {'lr': 1e-4}) -> None:\n super().__init__()\n self.model = model\n self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n self.loss_fn = PairWiseLoss()\n self.optimizer = Adam(self.model.parameters(), **optim_kwargs)\n self.epochs = num_epochs\n\n def fit(self, use_lora):\n epoch_bar = tqdm(range(self.epochs), desc='Train epoch')\n for epoch in range(self.epochs):\n step_bar = tqdm(range(self.train_dataloader.__len__()), desc='Train step of epoch %d' % epoch)\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n lora.mark_only_lora_as_trainable(self.model.model)\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n loss = self.loss_fn(chosen_reward, reject_reward)\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n step_bar.update()\n step_bar.set_postfix({'loss': loss.item()})\n\n # eval\n self.model.eval()\n for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:\n dist = 0\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n dist += (chosen_reward - reject_reward)\n dist_mean = dist / self.eval_dataloader.__len__()\n epoch_bar.update()\n step_bar.set_postfix({'loss': loss.item(), 'dist_mean': dist_mean.item()})\n step_bar.close()\n", "path": "applications/ChatGPT/chatgpt/trainer/rm.py"}]}
1,842
156
gh_patches_debug_5515
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-1949
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 0: invalid continuation byt with the current V2.0.0rc2, on Windows 10, Python 3.7.7. onmt_build_vocab doesn't save utf-8 src/tgt files. I build a data set and it's fine, but when I try to train I get ```PS C:\Users\Admin\Documents\pytorch\bugrepo\utf-8-bug-report-for-opennmt> onmt_train -config .\config.yaml [2020-11-26 11:08:30,632 INFO] Missing transforms field for corpus_1 data, set to default: []. [2020-11-26 11:08:30,633 WARNING] Corpus corpus_1's weight should be given. We default it to 1 for you. [2020-11-26 11:08:30,633 INFO] Missing transforms field for valid data, set to default: []. [2020-11-26 11:08:30,633 INFO] Parsed 2 corpora from -data. [2020-11-26 11:08:30,634 INFO] Get special vocabs from Transforms: {'src': set(), 'tgt': set()}. [2020-11-26 11:08:30,640 INFO] Loading vocab from text file... [2020-11-26 11:08:30,641 INFO] Loading src vocabulary from ./out.vocab.src Traceback (most recent call last): File "c:\users\admin\appdata\local\programs\python\python37\lib\runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "c:\users\admin\appdata\local\programs\python\python37\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "C:\Users\Admin\AppData\Local\Programs\Python\Python37\Scripts\onmt_train.exe\__main__.py", line 7, in <module> File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\bin\train.py", line 169, in main train(opt) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\bin\train.py", line 103, in train checkpoint, fields, transforms_cls = _init_train(opt) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\bin\train.py", line 80, in _init_train fields, transforms_cls = prepare_fields_transforms(opt) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\bin\train.py", line 34, in prepare_fields_transforms opt, src_specials=specials['src'], tgt_specials=specials['tgt']) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\inputters\fields.py", line 33, in build_dynamic_fields min_freq=opts.src_words_min_frequency) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\inputters\inputter.py", line 272, in _load_vocab vocab, has_count = _read_vocab_file(vocab_path, name) File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\inputters\inputter.py", line 475, in _read_vocab_file lines = [line.strip() for line in f if line.strip()] File "c:\users\admin\appdata\local\programs\python\python37\lib\site-packages\onmt\inputters\inputter.py", line 475, in <listcomp> lines = [line.strip() for line in f if line.strip()] File "c:\users\admin\appdata\local\programs\python\python37\lib\codecs.py", line 714, in __next__ return next(self.reader) File "c:\users\admin\appdata\local\programs\python\python37\lib\codecs.py", line 645, in __next__ line = self.readline() File "c:\users\admin\appdata\local\programs\python\python37\lib\codecs.py", line 558, in readline data = self.read(readsize, firstline=True) File "c:\users\admin\appdata\local\programs\python\python37\lib\codecs.py", line 504, in read newchars, decodedbytes = self.decode(data, self.errors) UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 0: invalid continuation byte ``` Repro case here: https://github.com/increpare/utf-8-bug-report-for-opennmt Adding in utf8 encoding to save_counter in build_vocab fixes it (about to make pull request). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `onmt/bin/build_vocab.py` Content: ``` 1 #!/usr/bin/env python 2 """Get vocabulary coutings from transformed corpora samples.""" 3 from onmt.utils.logging import init_logger 4 from onmt.utils.misc import set_random_seed, check_path 5 from onmt.utils.parse import ArgumentParser 6 from onmt.opts import dynamic_prepare_opts 7 from onmt.inputters.corpus import build_vocab 8 from onmt.transforms import make_transforms, get_transforms_cls 9 10 11 def build_vocab_main(opts): 12 """Apply transforms to samples of specified data and build vocab from it. 13 14 Transforms that need vocab will be disabled in this. 15 Built vocab is saved in plain text format as following and can be pass as 16 `-src_vocab` (and `-tgt_vocab`) when training: 17 ``` 18 <tok_0>\t<count_0> 19 <tok_1>\t<count_1> 20 ``` 21 """ 22 23 ArgumentParser.validate_prepare_opts(opts, build_vocab_only=True) 24 assert opts.n_sample == -1 or opts.n_sample > 1, \ 25 f"Illegal argument n_sample={opts.n_sample}." 26 27 logger = init_logger() 28 set_random_seed(opts.seed, False) 29 transforms_cls = get_transforms_cls(opts._all_transform) 30 fields = None 31 32 transforms = make_transforms(opts, transforms_cls, fields) 33 34 logger.info(f"Counter vocab from {opts.n_sample} samples.") 35 src_counter, tgt_counter = build_vocab( 36 opts, transforms, n_sample=opts.n_sample) 37 38 logger.info(f"Counters src:{len(src_counter)}") 39 logger.info(f"Counters tgt:{len(tgt_counter)}") 40 41 def save_counter(counter, save_path): 42 check_path(save_path, exist_ok=opts.overwrite, log=logger.warning) 43 with open(save_path, "w") as fo: 44 for tok, count in counter.most_common(): 45 fo.write(tok + "\t" + str(count) + "\n") 46 47 if opts.share_vocab: 48 src_counter += tgt_counter 49 tgt_counter = src_counter 50 logger.info(f"Counters after share:{len(src_counter)}") 51 save_counter(src_counter, opts.src_vocab) 52 else: 53 save_counter(src_counter, opts.src_vocab) 54 save_counter(tgt_counter, opts.tgt_vocab) 55 56 57 def _get_parser(): 58 parser = ArgumentParser(description='build_vocab.py') 59 dynamic_prepare_opts(parser, build_vocab_only=True) 60 return parser 61 62 63 def main(): 64 parser = _get_parser() 65 opts, unknown = parser.parse_known_args() 66 build_vocab_main(opts) 67 68 69 if __name__ == '__main__': 70 main() 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/onmt/bin/build_vocab.py b/onmt/bin/build_vocab.py --- a/onmt/bin/build_vocab.py +++ b/onmt/bin/build_vocab.py @@ -40,7 +40,7 @@ def save_counter(counter, save_path): check_path(save_path, exist_ok=opts.overwrite, log=logger.warning) - with open(save_path, "w") as fo: + with open(save_path, "w",encoding="utf8") as fo: for tok, count in counter.most_common(): fo.write(tok + "\t" + str(count) + "\n")
{"golden_diff": "diff --git a/onmt/bin/build_vocab.py b/onmt/bin/build_vocab.py\n--- a/onmt/bin/build_vocab.py\n+++ b/onmt/bin/build_vocab.py\n@@ -40,7 +40,7 @@\n \n def save_counter(counter, save_path):\n check_path(save_path, exist_ok=opts.overwrite, log=logger.warning)\n- with open(save_path, \"w\") as fo:\n+ with open(save_path, \"w\",encoding=\"utf8\") as fo:\n for tok, count in counter.most_common():\n fo.write(tok + \"\\t\" + str(count) + \"\\n\")\n", "issue": "UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 0: invalid continuation byt\nwith the current V2.0.0rc2, on Windows 10, Python 3.7.7. onmt_build_vocab doesn't save utf-8 src/tgt files. \r\n\r\nI build a data set and it's fine, but when I try to train I get\r\n\r\n```PS C:\\Users\\Admin\\Documents\\pytorch\\bugrepo\\utf-8-bug-report-for-opennmt> onmt_train -config .\\config.yaml \r\n[2020-11-26 11:08:30,632 INFO] Missing transforms field for corpus_1 data, set to default: [].\r\n[2020-11-26 11:08:30,633 WARNING] Corpus corpus_1's weight should be given. We default it to 1 for you.\r\n[2020-11-26 11:08:30,633 INFO] Missing transforms field for valid data, set to default: [].\r\n[2020-11-26 11:08:30,633 INFO] Parsed 2 corpora from -data.\r\n[2020-11-26 11:08:30,634 INFO] Get special vocabs from Transforms: {'src': set(), 'tgt': set()}.\r\n[2020-11-26 11:08:30,640 INFO] Loading vocab from text file...\r\n[2020-11-26 11:08:30,641 INFO] Loading src vocabulary from ./out.vocab.src\r\nTraceback (most recent call last):\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\Admin\\AppData\\Local\\Programs\\Python\\Python37\\Scripts\\onmt_train.exe\\__main__.py\", line 7, in <module>\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\bin\\train.py\", line 169, in main\r\n train(opt)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\bin\\train.py\", line 103, in train\r\n checkpoint, fields, transforms_cls = _init_train(opt)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\bin\\train.py\", line 80, in _init_train\r\n fields, transforms_cls = prepare_fields_transforms(opt)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\bin\\train.py\", line 34, in prepare_fields_transforms\r\n opt, src_specials=specials['src'], tgt_specials=specials['tgt'])\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\inputters\\fields.py\", line 33, in build_dynamic_fields\r\n min_freq=opts.src_words_min_frequency)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\inputters\\inputter.py\", line 272, in _load_vocab \r\n vocab, has_count = _read_vocab_file(vocab_path, name)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\inputters\\inputter.py\", line 475, in _read_vocab_file\r\n lines = [line.strip() for line in f if line.strip()]\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\onmt\\inputters\\inputter.py\", line 475, in <listcomp> \r\n lines = [line.strip() for line in f if line.strip()]\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\codecs.py\", line 714, in __next__\r\n return next(self.reader)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\codecs.py\", line 645, in __next__\r\n line = self.readline()\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\codecs.py\", line 558, in readline\r\n data = self.read(readsize, firstline=True)\r\n File \"c:\\users\\admin\\appdata\\local\\programs\\python\\python37\\lib\\codecs.py\", line 504, in read\r\n newchars, decodedbytes = self.decode(data, self.errors)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xe3 in position 0: invalid continuation byte\r\n```\r\n\r\nRepro case here:\r\n\r\nhttps://github.com/increpare/utf-8-bug-report-for-opennmt\r\n\r\nAdding in utf8 encoding to save_counter in build_vocab fixes it (about to make pull request).\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Get vocabulary coutings from transformed corpora samples.\"\"\"\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import set_random_seed, check_path\nfrom onmt.utils.parse import ArgumentParser\nfrom onmt.opts import dynamic_prepare_opts\nfrom onmt.inputters.corpus import build_vocab\nfrom onmt.transforms import make_transforms, get_transforms_cls\n\n\ndef build_vocab_main(opts):\n \"\"\"Apply transforms to samples of specified data and build vocab from it.\n\n Transforms that need vocab will be disabled in this.\n Built vocab is saved in plain text format as following and can be pass as\n `-src_vocab` (and `-tgt_vocab`) when training:\n ```\n <tok_0>\\t<count_0>\n <tok_1>\\t<count_1>\n ```\n \"\"\"\n\n ArgumentParser.validate_prepare_opts(opts, build_vocab_only=True)\n assert opts.n_sample == -1 or opts.n_sample > 1, \\\n f\"Illegal argument n_sample={opts.n_sample}.\"\n\n logger = init_logger()\n set_random_seed(opts.seed, False)\n transforms_cls = get_transforms_cls(opts._all_transform)\n fields = None\n\n transforms = make_transforms(opts, transforms_cls, fields)\n\n logger.info(f\"Counter vocab from {opts.n_sample} samples.\")\n src_counter, tgt_counter = build_vocab(\n opts, transforms, n_sample=opts.n_sample)\n\n logger.info(f\"Counters src:{len(src_counter)}\")\n logger.info(f\"Counters tgt:{len(tgt_counter)}\")\n\n def save_counter(counter, save_path):\n check_path(save_path, exist_ok=opts.overwrite, log=logger.warning)\n with open(save_path, \"w\") as fo:\n for tok, count in counter.most_common():\n fo.write(tok + \"\\t\" + str(count) + \"\\n\")\n\n if opts.share_vocab:\n src_counter += tgt_counter\n tgt_counter = src_counter\n logger.info(f\"Counters after share:{len(src_counter)}\")\n save_counter(src_counter, opts.src_vocab)\n else:\n save_counter(src_counter, opts.src_vocab)\n save_counter(tgt_counter, opts.tgt_vocab)\n\n\ndef _get_parser():\n parser = ArgumentParser(description='build_vocab.py')\n dynamic_prepare_opts(parser, build_vocab_only=True)\n return parser\n\n\ndef main():\n parser = _get_parser()\n opts, unknown = parser.parse_known_args()\n build_vocab_main(opts)\n\n\nif __name__ == '__main__':\n main()\n", "path": "onmt/bin/build_vocab.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Get vocabulary coutings from transformed corpora samples.\"\"\"\nfrom onmt.utils.logging import init_logger\nfrom onmt.utils.misc import set_random_seed, check_path\nfrom onmt.utils.parse import ArgumentParser\nfrom onmt.opts import dynamic_prepare_opts\nfrom onmt.inputters.corpus import build_vocab\nfrom onmt.transforms import make_transforms, get_transforms_cls\n\n\ndef build_vocab_main(opts):\n \"\"\"Apply transforms to samples of specified data and build vocab from it.\n\n Transforms that need vocab will be disabled in this.\n Built vocab is saved in plain text format as following and can be pass as\n `-src_vocab` (and `-tgt_vocab`) when training:\n ```\n <tok_0>\\t<count_0>\n <tok_1>\\t<count_1>\n ```\n \"\"\"\n\n ArgumentParser.validate_prepare_opts(opts, build_vocab_only=True)\n assert opts.n_sample == -1 or opts.n_sample > 1, \\\n f\"Illegal argument n_sample={opts.n_sample}.\"\n\n logger = init_logger()\n set_random_seed(opts.seed, False)\n transforms_cls = get_transforms_cls(opts._all_transform)\n fields = None\n\n transforms = make_transforms(opts, transforms_cls, fields)\n\n logger.info(f\"Counter vocab from {opts.n_sample} samples.\")\n src_counter, tgt_counter = build_vocab(\n opts, transforms, n_sample=opts.n_sample)\n\n logger.info(f\"Counters src:{len(src_counter)}\")\n logger.info(f\"Counters tgt:{len(tgt_counter)}\")\n\n def save_counter(counter, save_path):\n check_path(save_path, exist_ok=opts.overwrite, log=logger.warning)\n with open(save_path, \"w\",encoding=\"utf8\") as fo:\n for tok, count in counter.most_common():\n fo.write(tok + \"\\t\" + str(count) + \"\\n\")\n\n if opts.share_vocab:\n src_counter += tgt_counter\n tgt_counter = src_counter\n logger.info(f\"Counters after share:{len(src_counter)}\")\n save_counter(src_counter, opts.src_vocab)\n else:\n save_counter(src_counter, opts.src_vocab)\n save_counter(tgt_counter, opts.tgt_vocab)\n\n\ndef _get_parser():\n parser = ArgumentParser(description='build_vocab.py')\n dynamic_prepare_opts(parser, build_vocab_only=True)\n return parser\n\n\ndef main():\n parser = _get_parser()\n opts, unknown = parser.parse_known_args()\n build_vocab_main(opts)\n\n\nif __name__ == '__main__':\n main()\n", "path": "onmt/bin/build_vocab.py"}]}
2,211
134
gh_patches_debug_12134
rasdani/github-patches
git_diff
dotkom__onlineweb4-268
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Registering user messes up values of address and zip code For example trying to register with the zip code 1234 will add the value (u'1234',) to the database. Exact same thing with address. This also causes the zip code to go over the 4 character limit which again causes the database (if it's not SQLite) to throw an error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/authentication/views.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import uuid 4 import re 5 6 from django.contrib import auth 7 from django.contrib import messages 8 from django.core.mail import send_mail 9 from django.shortcuts import render, redirect, get_object_or_404 10 from django.http import HttpResponseRedirect 11 from django.utils.translation import ugettext as _ 12 13 from django.conf import settings 14 from apps.authentication.forms import (LoginForm, RegisterForm, 15 RecoveryForm, ChangePasswordForm) 16 from apps.authentication.models import OnlineUser, RegisterToken, Email 17 18 def login(request): 19 redirect_url = request.REQUEST.get('next', '') 20 if request.method == 'POST': 21 form = LoginForm(request.POST) 22 if form.login(request): 23 messages.success(request, _(u'Du er nΓ₯ logget inn.')) 24 if redirect_url: 25 return HttpResponseRedirect(redirect_url) 26 return HttpResponseRedirect('/') 27 else: form = LoginForm(request.POST, auto_id=True) 28 else: 29 form = LoginForm() 30 31 response_dict = { 'form' : form, 'next' : redirect_url} 32 return render(request, 'auth/login.html', response_dict) 33 34 def logout(request): 35 auth.logout(request) 36 messages.success(request, _(u'Du er nΓ₯ logget ut.')) 37 return HttpResponseRedirect('/') 38 39 def register(request): 40 if request.user.is_authenticated(): 41 messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.')) 42 return HttpResponseRedirect('/') 43 else: 44 if request.method == 'POST': 45 form = RegisterForm(request.POST) 46 if form.is_valid(): 47 cleaned = form.cleaned_data 48 49 # Create user 50 user = OnlineUser( 51 username=cleaned['username'].lower(), 52 first_name=cleaned['first_name'].title(), 53 last_name=cleaned['last_name'].title(), 54 email=cleaned['email'].lower(), 55 ) 56 # Set remaining fields 57 user.phone=cleaned['phone'], 58 user.address=cleaned['address'].title(), 59 user.zip_code=cleaned['zip_code'], 60 # Store password properly 61 user.set_password(cleaned['password']) 62 # Users need to be manually activated 63 user.is_active = False 64 65 user.save() 66 67 # Create the registration token 68 token = uuid.uuid4().hex 69 rt = RegisterToken(user=user, email=cleaned['email'], token=token) 70 rt.save() 71 72 email_message = _(u""" 73 En konto har blitt registrert pΓ₯ online.ntnu.no med denne epostadressen. Dersom du ikke 74 har utfΓΈrt denne handlingen ber vi deg se bort fra denne eposten. 75 76 For Γ₯ bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gjΓΈre 77 dette ved Γ₯ besΓΈke linken under. 78 79 http://%s/auth/verify/%s/ 80 81 Denne lenken vil vΓ¦re gyldig i 24 timer. Dersom du behΓΈver Γ₯ fΓ₯ tilsendt en ny lenke 82 kan dette gjΓΈres med funksjonen for Γ₯ gjenopprette passord. 83 """) % (request.META['HTTP_HOST'], token) 84 85 send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [user.email,]) 86 87 messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.')) 88 89 return HttpResponseRedirect('/') 90 else: 91 form = RegisterForm(request.POST, auto_id=True) 92 else: 93 form = RegisterForm() 94 95 return render(request, 'auth/register.html', {'form': form, }) 96 97 def verify(request, token): 98 rt = get_object_or_404(RegisterToken, token=token) 99 100 if rt.is_valid: 101 email = get_object_or_404(Email, email=rt.email) 102 email.verified = True 103 email.save() 104 105 user = getattr(rt, 'user') 106 107 # If it is a stud email, set the ntnu_username for user 108 if re.match(r'[^@][email protected]', rt.email): 109 user.ntnu_username = rt.email.split("@")[0] 110 111 user_activated = False 112 if not user.is_active: 113 user.is_active = True 114 user_activated = True 115 116 user.save() 117 rt.delete() 118 119 if user_activated: 120 messages.success(request, _(u'Bruker %s ble aktivert. Du kan nΓ₯ logge inn.') % user.username) 121 return redirect('auth_login') 122 else: 123 messages.success(request, _(u'Eposten %s er nΓ₯ verifisert.') % email) 124 return redirect('profiles') 125 else: 126 messages.error(request, _(u'Denne lenken er utlΓΈpt. Bruk gjenopprett passord for Γ₯ fΓ₯ tilsendt en ny lenke.')) 127 return HttpResponseRedirect('/') 128 129 130 def recover(request): 131 if request.user.is_authenticated(): 132 messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.')) 133 return HttpResponseRedirect('/') 134 else: 135 if request.method == 'POST': 136 form = RecoveryForm(request.POST) 137 if form.is_valid(): 138 email = form.cleaned_data['email'] 139 users = OnlineUser.objects.filter(email=email) 140 141 if len(users) == 0: 142 messages.error(request, _(u'Denne eposten er ikke registrert i vΓ₯re systemer.')) 143 return HttpResponseRedirect('/') 144 145 user = users[0] 146 user.save() 147 148 # Create the registration token 149 token = uuid.uuid4().hex 150 rt = RegisterToken(user=user, token=token) 151 rt.save() 152 153 email_message = _(u""" 154 Vi har mottat forespΓΈrsel om Γ₯ gjenopprette passordet for kontoen bundet til %s. 155 Dersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten. 156 157 Brukernavn: %s 158 159 Hvis du ΓΈnsker Γ₯ gjennomfΓΈre en gjenoppretning av passord, bruk lenken under. 160 161 http://%s/auth/set_password/%s/ 162 163 Denne lenken vil vΓ¦re gyldig i 24 timer. Dersom du behΓΈver Γ₯ fΓ₯ tilsendt en ny lenke 164 kan dette gjΓΈres med funksjonen for Γ₯ gjenopprette passord. 165 """) % (email, user.username, request.META['HTTP_HOST'], token) 166 167 send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email,]) 168 169 messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email) 170 171 return HttpResponseRedirect('/') 172 else: 173 form = RecoveryForm(request.POST, auto_id=True) 174 else: 175 form = RecoveryForm() 176 177 return render(request, 'auth/recover.html', {'form': form}) 178 179 def set_password(request, token=None): 180 if request.user.is_authenticated(): 181 return HttpResponseRedirect('/') 182 else: 183 rt = get_object_or_404(RegisterToken, token=token) 184 185 if rt.is_valid: 186 if request.method == 'POST': 187 form = ChangePasswordForm(request.POST, auto_id=True) 188 if form.is_valid(): 189 user = getattr(rt, 'user') 190 191 user.is_active = True 192 user.set_password(form.cleaned_data['new_password']) 193 user.save() 194 195 rt.delete() 196 197 messages.success(request, _(u'Bruker %s har gjennomfΓΈrt vellykket gjenoppretning av passord. Du kan nΓ₯ logge inn.') % user) 198 199 return HttpResponseRedirect('/') 200 else: 201 202 form = ChangePasswordForm() 203 204 messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn ΓΈnsket passord.')) 205 206 return render(request, 'auth/set_password.html', {'form': form, 'token': token}) 207 208 else: 209 messages.error(request, _(u'Lenken er utlΓΈpt. Vennligst bruk gjenoppretning av passord for Γ₯ fΓ₯ tilsendt en ny lenke.')) 210 return HttpResponseRedirect('/') 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/authentication/views.py b/apps/authentication/views.py --- a/apps/authentication/views.py +++ b/apps/authentication/views.py @@ -54,9 +54,9 @@ email=cleaned['email'].lower(), ) # Set remaining fields - user.phone=cleaned['phone'], - user.address=cleaned['address'].title(), - user.zip_code=cleaned['zip_code'], + user.phone=cleaned['phone'] + user.address=cleaned['address'].title() + user.zip_code=cleaned['zip_code'] # Store password properly user.set_password(cleaned['password']) # Users need to be manually activated
{"golden_diff": "diff --git a/apps/authentication/views.py b/apps/authentication/views.py\n--- a/apps/authentication/views.py\n+++ b/apps/authentication/views.py\n@@ -54,9 +54,9 @@\n email=cleaned['email'].lower(),\n )\n # Set remaining fields\n- user.phone=cleaned['phone'],\n- user.address=cleaned['address'].title(),\n- user.zip_code=cleaned['zip_code'],\n+ user.phone=cleaned['phone']\n+ user.address=cleaned['address'].title()\n+ user.zip_code=cleaned['zip_code']\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n", "issue": "Registering user messes up values of address and zip code\nFor example trying to register with the zip code 1234 will add the value (u'1234',) to the database. Exact same thing with address. \n\nThis also causes the zip code to go over the 4 character limit which again causes the database (if it's not SQLite) to throw an error. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport uuid\nimport re\n\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\n\nfrom django.conf import settings\nfrom apps.authentication.forms import (LoginForm, RegisterForm, \n RecoveryForm, ChangePasswordForm)\nfrom apps.authentication.models import OnlineUser, RegisterToken, Email\n\ndef login(request):\n redirect_url = request.REQUEST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.login(request):\n messages.success(request, _(u'Du er n\u00e5 logget inn.'))\n if redirect_url:\n return HttpResponseRedirect(redirect_url)\n return HttpResponseRedirect('/')\n else: form = LoginForm(request.POST, auto_id=True)\n else:\n form = LoginForm()\n\n response_dict = { 'form' : form, 'next' : redirect_url}\n return render(request, 'auth/login.html', response_dict)\n\ndef logout(request):\n auth.logout(request)\n messages.success(request, _(u'Du er n\u00e5 logget ut.'))\n return HttpResponseRedirect('/')\n\ndef register(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n # Create user\n user = OnlineUser(\n username=cleaned['username'].lower(), \n first_name=cleaned['first_name'].title(), \n last_name=cleaned['last_name'].title(),\n email=cleaned['email'].lower(),\n )\n # Set remaining fields\n user.phone=cleaned['phone'],\n user.address=cleaned['address'].title(),\n user.zip_code=cleaned['zip_code'],\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n user.is_active = False\n\n user.save()\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, email=cleaned['email'], token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nEn konto har blitt registrert p\u00e5 online.ntnu.no med denne epostadressen. Dersom du ikke\nhar utf\u00f8rt denne handlingen ber vi deg se bort fra denne eposten.\n\nFor \u00e5 bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gj\u00f8re\ndette ved \u00e5 bes\u00f8ke linken under.\n\nhttp://%s/auth/verify/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [user.email,])\n\n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n\n return HttpResponseRedirect('/') \n else:\n form = RegisterForm(request.POST, auto_id=True)\n else:\n form = RegisterForm()\n\n return render(request, 'auth/register.html', {'form': form, })\n\ndef verify(request, token):\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n email = get_object_or_404(Email, email=rt.email)\n email.verified = True\n email.save()\n \n user = getattr(rt, 'user')\n\n # If it is a stud email, set the ntnu_username for user\n if re.match(r'[^@][email protected]', rt.email):\n user.ntnu_username = rt.email.split(\"@\")[0]\n\n user_activated = False\n if not user.is_active:\n user.is_active = True\n user_activated = True\n\n user.save()\n rt.delete()\n\n if user_activated:\n messages.success(request, _(u'Bruker %s ble aktivert. Du kan n\u00e5 logge inn.') % user.username)\n return redirect('auth_login')\n else:\n messages.success(request, _(u'Eposten %s er n\u00e5 verifisert.') % email)\n return redirect('profiles')\n else:\n messages.error(request, _(u'Denne lenken er utl\u00f8pt. Bruk gjenopprett passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n \n\ndef recover(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RecoveryForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n users = OnlineUser.objects.filter(email=email)\n\n if len(users) == 0:\n messages.error(request, _(u'Denne eposten er ikke registrert i v\u00e5re systemer.'))\n return HttpResponseRedirect('/') \n\n user = users[0]\n user.save()\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nVi har mottat foresp\u00f8rsel om \u00e5 gjenopprette passordet for kontoen bundet til %s.\nDersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.\n\nBrukernavn: %s\n\nHvis du \u00f8nsker \u00e5 gjennomf\u00f8re en gjenoppretning av passord, bruk lenken under.\n\nhttp://%s/auth/set_password/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (email, user.username, request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email,])\n\n messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email)\n\n return HttpResponseRedirect('/') \n else:\n form = RecoveryForm(request.POST, auto_id=True)\n else:\n form = RecoveryForm()\n\n return render(request, 'auth/recover.html', {'form': form})\n\ndef set_password(request, token=None): \n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n if request.method == 'POST':\n form = ChangePasswordForm(request.POST, auto_id=True)\n if form.is_valid():\n user = getattr(rt, 'user')\n\n user.is_active = True\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n \n rt.delete()\n\n messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user)\n \n return HttpResponseRedirect('/') \n else:\n \n form = ChangePasswordForm()\n\n messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n\n return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n\n else:\n messages.error(request, _(u'Lenken er utl\u00f8pt. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n", "path": "apps/authentication/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport uuid\nimport re\n\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\n\nfrom django.conf import settings\nfrom apps.authentication.forms import (LoginForm, RegisterForm, \n RecoveryForm, ChangePasswordForm)\nfrom apps.authentication.models import OnlineUser, RegisterToken, Email\n\ndef login(request):\n redirect_url = request.REQUEST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.login(request):\n messages.success(request, _(u'Du er n\u00e5 logget inn.'))\n if redirect_url:\n return HttpResponseRedirect(redirect_url)\n return HttpResponseRedirect('/')\n else: form = LoginForm(request.POST, auto_id=True)\n else:\n form = LoginForm()\n\n response_dict = { 'form' : form, 'next' : redirect_url}\n return render(request, 'auth/login.html', response_dict)\n\ndef logout(request):\n auth.logout(request)\n messages.success(request, _(u'Du er n\u00e5 logget ut.'))\n return HttpResponseRedirect('/')\n\ndef register(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n # Create user\n user = OnlineUser(\n username=cleaned['username'].lower(), \n first_name=cleaned['first_name'].title(), \n last_name=cleaned['last_name'].title(),\n email=cleaned['email'].lower(),\n )\n # Set remaining fields\n user.phone=cleaned['phone']\n user.address=cleaned['address'].title()\n user.zip_code=cleaned['zip_code']\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n user.is_active = False\n\n user.save()\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, email=cleaned['email'], token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nEn konto har blitt registrert p\u00e5 online.ntnu.no med denne epostadressen. Dersom du ikke\nhar utf\u00f8rt denne handlingen ber vi deg se bort fra denne eposten.\n\nFor \u00e5 bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gj\u00f8re\ndette ved \u00e5 bes\u00f8ke linken under.\n\nhttp://%s/auth/verify/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [user.email,])\n\n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n\n return HttpResponseRedirect('/') \n else:\n form = RegisterForm(request.POST, auto_id=True)\n else:\n form = RegisterForm()\n\n return render(request, 'auth/register.html', {'form': form, })\n\ndef verify(request, token):\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n email = get_object_or_404(Email, email=rt.email)\n email.verified = True\n email.save()\n \n user = getattr(rt, 'user')\n\n # If it is a stud email, set the ntnu_username for user\n if re.match(r'[^@][email protected]', rt.email):\n user.ntnu_username = rt.email.split(\"@\")[0]\n\n user_activated = False\n if not user.is_active:\n user.is_active = True\n user_activated = True\n\n user.save()\n rt.delete()\n\n if user_activated:\n messages.success(request, _(u'Bruker %s ble aktivert. Du kan n\u00e5 logge inn.') % user.username)\n return redirect('auth_login')\n else:\n messages.success(request, _(u'Eposten %s er n\u00e5 verifisert.') % email)\n return redirect('profiles')\n else:\n messages.error(request, _(u'Denne lenken er utl\u00f8pt. Bruk gjenopprett passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n \n\ndef recover(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RecoveryForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n users = OnlineUser.objects.filter(email=email)\n\n if len(users) == 0:\n messages.error(request, _(u'Denne eposten er ikke registrert i v\u00e5re systemer.'))\n return HttpResponseRedirect('/') \n\n user = users[0]\n user.save()\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nVi har mottat foresp\u00f8rsel om \u00e5 gjenopprette passordet for kontoen bundet til %s.\nDersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.\n\nBrukernavn: %s\n\nHvis du \u00f8nsker \u00e5 gjennomf\u00f8re en gjenoppretning av passord, bruk lenken under.\n\nhttp://%s/auth/set_password/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (email, user.username, request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email,])\n\n messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email)\n\n return HttpResponseRedirect('/') \n else:\n form = RecoveryForm(request.POST, auto_id=True)\n else:\n form = RecoveryForm()\n\n return render(request, 'auth/recover.html', {'form': form})\n\ndef set_password(request, token=None): \n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n if request.method == 'POST':\n form = ChangePasswordForm(request.POST, auto_id=True)\n if form.is_valid():\n user = getattr(rt, 'user')\n\n user.is_active = True\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n \n rt.delete()\n\n messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user)\n \n return HttpResponseRedirect('/') \n else:\n \n form = ChangePasswordForm()\n\n messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n\n return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n\n else:\n messages.error(request, _(u'Lenken er utl\u00f8pt. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n", "path": "apps/authentication/views.py"}]}
2,653
150
gh_patches_debug_730
rasdani/github-patches
git_diff
pytorch__pytorch-116517
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing packaging dependency in torch 2.1.x ### πŸ› Describe the bug Hi, [torch.utils.tensorboard requires "packaging"](https://github.com/pytorch/pytorch/blob/fa1ccc34c4f65756bc50c3e3ab135c88b175b18c/torch/utils/tensorboard/__init__.py#L2C1-L3C1) to be installed but that dependency is [missing on torch 2.1.x](https://github.com/pytorch/pytorch/blob/v2.1.2-rc1/requirements.txt). Here's some example code: ```python from torch.utils.tensorboard import SummaryWriter ``` The links above point to a RC version of 2.1.2 but this is also the case for 2.1.1. Would it be possible to make a patch release to add the dependency? ### Versions Python version: 3.9.16 (main, Dec 7 2022, 10:16:11) [Clang 14.0.0 (clang-140[0.0.29.202](http://0.0.29.202/) )] (64-bit runtime) Python platform: macOS-13.6.1-x86_64-i386-64bit Is CUDA available: False CUDA runtime version: No CUDA CUDA_MODULE_LOADING set to: N/A GPU models and configuration: No CUDA Nvidia driver version: No CUDA cuDNN version: No CUDA HIP runtime version: N/A MIOpen runtime version: N/A Is XNNPACK available: True CPU: Intel(R) Core(TM) i5-7287U CPU @ 3.30GHz Versions of relevant libraries: [pip3] numpy==1.23.5 [pip3] torch==2.1.1 [pip3] torchvision==0.16.1 [conda] Could not collect --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torch/utils/tensorboard/__init__.py` Content: ``` 1 import tensorboard 2 from packaging.version import Version 3 4 if not hasattr(tensorboard, "__version__") or Version( 5 tensorboard.__version__ 6 ) < Version("1.15"): 7 raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above") 8 9 del Version 10 del tensorboard 11 12 from .writer import FileWriter, SummaryWriter # noqa: F401 13 from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401 14 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torch/utils/tensorboard/__init__.py b/torch/utils/tensorboard/__init__.py --- a/torch/utils/tensorboard/__init__.py +++ b/torch/utils/tensorboard/__init__.py @@ -1,5 +1,5 @@ import tensorboard -from packaging.version import Version +from torch._vendor.packaging.version import Version if not hasattr(tensorboard, "__version__") or Version( tensorboard.__version__
{"golden_diff": "diff --git a/torch/utils/tensorboard/__init__.py b/torch/utils/tensorboard/__init__.py\n--- a/torch/utils/tensorboard/__init__.py\n+++ b/torch/utils/tensorboard/__init__.py\n@@ -1,5 +1,5 @@\n import tensorboard\n-from packaging.version import Version\n+from torch._vendor.packaging.version import Version\n \n if not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n", "issue": "Missing packaging dependency in torch 2.1.x\n### \ud83d\udc1b Describe the bug\n\nHi,\r\n[torch.utils.tensorboard requires \"packaging\"](https://github.com/pytorch/pytorch/blob/fa1ccc34c4f65756bc50c3e3ab135c88b175b18c/torch/utils/tensorboard/__init__.py#L2C1-L3C1) to be installed but that dependency is [missing on torch 2.1.x](https://github.com/pytorch/pytorch/blob/v2.1.2-rc1/requirements.txt).\r\n\r\nHere's some example code:\r\n```python\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n```\r\n\r\nThe links above point to a RC version of 2.1.2 but this is also the case for 2.1.1. Would it be possible to make a patch release to add the dependency?\n\n### Versions\n\nPython version: 3.9.16 (main, Dec 7 2022, 10:16:11) [Clang 14.0.0 (clang-140[0.0.29.202](http://0.0.29.202/)\r\n)] (64-bit runtime)\r\nPython platform: macOS-13.6.1-x86_64-i386-64bit\r\nIs CUDA available: False\r\nCUDA runtime version: No CUDA\r\nCUDA_MODULE_LOADING set to: N/A\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nIntel(R) Core(TM) i5-7287U CPU @ 3.30GHz\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.23.5\r\n[pip3] torch==2.1.1\r\n[pip3] torchvision==0.16.1\r\n[conda] Could not collect\n", "before_files": [{"content": "import tensorboard\nfrom packaging.version import Version\n\nif not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n) < Version(\"1.15\"):\n raise ImportError(\"TensorBoard logging requires TensorBoard version 1.15 or above\")\n\ndel Version\ndel tensorboard\n\nfrom .writer import FileWriter, SummaryWriter # noqa: F401\nfrom tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401\n", "path": "torch/utils/tensorboard/__init__.py"}], "after_files": [{"content": "import tensorboard\nfrom torch._vendor.packaging.version import Version\n\nif not hasattr(tensorboard, \"__version__\") or Version(\n tensorboard.__version__\n) < Version(\"1.15\"):\n raise ImportError(\"TensorBoard logging requires TensorBoard version 1.15 or above\")\n\ndel Version\ndel tensorboard\n\nfrom .writer import FileWriter, SummaryWriter # noqa: F401\nfrom tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401\n", "path": "torch/utils/tensorboard/__init__.py"}]}
840
102
gh_patches_debug_21259
rasdani/github-patches
git_diff
archlinux__archinstall-914
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- desktop profiles, replace pipewire-media-session with wireplumber, wireplumber seembs to be the newest replacement media-session replaceing pipewire-media-session hi, would it be possible to replace pipewire-media-session with wireplumber? or, to make things flexable, add a question in asking, "would you like to use wireplumber or pipewire-media-session? p.s i'm using wireplumber currently. wireplumber will remove pipewire-media-session and the systemd units. a total replacement. thank you for considdering my request! Majid Hussain --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `profiles/applications/pipewire.py` Content: ``` 1 import archinstall 2 import logging 3 4 # Define the package list in order for lib to source 5 # which packages will be installed by this profile 6 __packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-media-session", "pipewire-pulse", "gst-plugin-pipewire", "libpulse"] 7 8 archinstall.log('Installing pipewire', level=logging.INFO) 9 archinstall.storage['installation_session'].add_additional_packages(__packages__) 10 11 @archinstall.plugin 12 def on_user_created(installation :archinstall.Installer, user :str): 13 archinstall.log(f"Enabling pipewire-pulse for {user}", level=logging.INFO) 14 installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user) ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/profiles/applications/pipewire.py b/profiles/applications/pipewire.py --- a/profiles/applications/pipewire.py +++ b/profiles/applications/pipewire.py @@ -3,7 +3,7 @@ # Define the package list in order for lib to source # which packages will be installed by this profile -__packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-media-session", "pipewire-pulse", "gst-plugin-pipewire", "libpulse"] +__packages__ = ["pipewire", "pipewire-alsa", "pipewire-jack", "pipewire-pulse", "gst-plugin-pipewire", "libpulse", "wireplumber"] archinstall.log('Installing pipewire', level=logging.INFO) archinstall.storage['installation_session'].add_additional_packages(__packages__) @@ -11,4 +11,4 @@ @archinstall.plugin def on_user_created(installation :archinstall.Installer, user :str): archinstall.log(f"Enabling pipewire-pulse for {user}", level=logging.INFO) - installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user) \ No newline at end of file + installation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)
{"golden_diff": "diff --git a/profiles/applications/pipewire.py b/profiles/applications/pipewire.py\n--- a/profiles/applications/pipewire.py\n+++ b/profiles/applications/pipewire.py\n@@ -3,7 +3,7 @@\n \n # Define the package list in order for lib to source\n # which packages will be installed by this profile\n-__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-media-session\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\"]\n+__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\", \"wireplumber\"]\n \n archinstall.log('Installing pipewire', level=logging.INFO)\n archinstall.storage['installation_session'].add_additional_packages(__packages__)\n@@ -11,4 +11,4 @@\n @archinstall.plugin\n def on_user_created(installation :archinstall.Installer, user :str):\n \tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n-\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n\\ No newline at end of file\n+\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n", "issue": "desktop profiles, replace pipewire-media-session with wireplumber, wireplumber seembs to be the newest replacement media-session replaceing pipewire-media-session\nhi,\r\nwould it be possible to replace pipewire-media-session with wireplumber?\r\nor, to make things flexable,\r\nadd a question in asking,\r\n\"would you like to use wireplumber or pipewire-media-session?\r\np.s\r\ni'm using wireplumber currently.\r\nwireplumber will remove pipewire-media-session and the systemd units.\r\na total replacement.\r\nthank you for considdering my request!\r\nMajid Hussain\n", "before_files": [{"content": "import archinstall\nimport logging\n\n# Define the package list in order for lib to source\n# which packages will be installed by this profile\n__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-media-session\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\"]\n\narchinstall.log('Installing pipewire', level=logging.INFO)\narchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\[email protected]\ndef on_user_created(installation :archinstall.Installer, user :str):\n\tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)", "path": "profiles/applications/pipewire.py"}], "after_files": [{"content": "import archinstall\nimport logging\n\n# Define the package list in order for lib to source\n# which packages will be installed by this profile\n__packages__ = [\"pipewire\", \"pipewire-alsa\", \"pipewire-jack\", \"pipewire-pulse\", \"gst-plugin-pipewire\", \"libpulse\", \"wireplumber\"]\n\narchinstall.log('Installing pipewire', level=logging.INFO)\narchinstall.storage['installation_session'].add_additional_packages(__packages__)\n\[email protected]\ndef on_user_created(installation :archinstall.Installer, user :str):\n\tarchinstall.log(f\"Enabling pipewire-pulse for {user}\", level=logging.INFO)\n\tinstallation.chroot('systemctl enable --user pipewire-pulse.service', run_as=user)\n", "path": "profiles/applications/pipewire.py"}]}
565
295
gh_patches_debug_33401
rasdani/github-patches
git_diff
paperless-ngx__paperless-ngx-3198
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Tags user has no permissions for show as "Private" after file upload ### Description When a user uploads a file that triggers Tag-Rules of other users the "detected" Tags show up as Private as the uploading user does not have the permissions to view these Tags. (The image is german so Privat = Private) ![s8u2Hd0b07ZqJ5n5](https://user-images.githubusercontent.com/46486021/234629541-7e37e389-c3b6-4ac6-bb4a-f201ecd6f2af.png) I don't know if that is a feature or not ... for me its confusing. ("Neu" is the Inbox-Tag ... the only one that should show here) ### Steps to reproduce 1. Login as User A 2. Create Tag "Test" with rule to add when "Test" is detected as a Word in the Document 3. Set Permission of this Tag to User A only 4. Login as User B 5. Upload a file with the word "Test" in it 6. See a Private-Tag added by paperless ### Webserver logs ```bash - ``` ### Browser logs ```bash - ``` ### Paperless-ngx version 1.14.0 ### Host OS Synology DSM 7.1.1-42962 Update 5 ### Installation method Docker - official image ### Browser Chrome ### Configuration changes - ### Other - --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/documents/matching.py` Content: ``` 1 import logging 2 import re 3 4 from documents.models import Correspondent 5 from documents.models import DocumentType 6 from documents.models import MatchingModel 7 from documents.models import StoragePath 8 from documents.models import Tag 9 from documents.permissions import get_objects_for_user_owner_aware 10 11 logger = logging.getLogger("paperless.matching") 12 13 14 def log_reason(matching_model, document, reason): 15 class_name = type(matching_model).__name__ 16 logger.debug( 17 f"{class_name} {matching_model.name} matched on document " 18 f"{document} because {reason}", 19 ) 20 21 22 def match_correspondents(document, classifier, user=None): 23 pred_id = classifier.predict_correspondent(document.content) if classifier else None 24 25 if user is not None: 26 correspondents = get_objects_for_user_owner_aware( 27 user, 28 "documents.view_correspondent", 29 Correspondent, 30 ) 31 else: 32 correspondents = Correspondent.objects.all() 33 34 return list( 35 filter(lambda o: matches(o, document) or o.pk == pred_id, correspondents), 36 ) 37 38 39 def match_document_types(document, classifier, user=None): 40 pred_id = classifier.predict_document_type(document.content) if classifier else None 41 42 if user is not None: 43 document_types = get_objects_for_user_owner_aware( 44 user, 45 "documents.view_documenttype", 46 DocumentType, 47 ) 48 else: 49 document_types = DocumentType.objects.all() 50 51 return list( 52 filter(lambda o: matches(o, document) or o.pk == pred_id, document_types), 53 ) 54 55 56 def match_tags(document, classifier, user=None): 57 predicted_tag_ids = classifier.predict_tags(document.content) if classifier else [] 58 59 if user is not None: 60 tags = get_objects_for_user_owner_aware(user, "documents.view_tag", Tag) 61 else: 62 tags = Tag.objects.all() 63 64 return list( 65 filter(lambda o: matches(o, document) or o.pk in predicted_tag_ids, tags), 66 ) 67 68 69 def match_storage_paths(document, classifier, user=None): 70 pred_id = classifier.predict_storage_path(document.content) if classifier else None 71 72 if user is not None: 73 storage_paths = get_objects_for_user_owner_aware( 74 user, 75 "documents.view_storagepath", 76 StoragePath, 77 ) 78 else: 79 storage_paths = StoragePath.objects.all() 80 81 return list( 82 filter( 83 lambda o: matches(o, document) or o.pk == pred_id, 84 storage_paths, 85 ), 86 ) 87 88 89 def matches(matching_model, document): 90 search_kwargs = {} 91 92 document_content = document.content 93 94 # Check that match is not empty 95 if not matching_model.match.strip(): 96 return False 97 98 if matching_model.is_insensitive: 99 search_kwargs = {"flags": re.IGNORECASE} 100 101 if matching_model.matching_algorithm == MatchingModel.MATCH_NONE: 102 return False 103 104 elif matching_model.matching_algorithm == MatchingModel.MATCH_ALL: 105 for word in _split_match(matching_model): 106 search_result = re.search(rf"\b{word}\b", document_content, **search_kwargs) 107 if not search_result: 108 return False 109 log_reason( 110 matching_model, 111 document, 112 f"it contains all of these words: {matching_model.match}", 113 ) 114 return True 115 116 elif matching_model.matching_algorithm == MatchingModel.MATCH_ANY: 117 for word in _split_match(matching_model): 118 if re.search(rf"\b{word}\b", document_content, **search_kwargs): 119 log_reason(matching_model, document, f"it contains this word: {word}") 120 return True 121 return False 122 123 elif matching_model.matching_algorithm == MatchingModel.MATCH_LITERAL: 124 result = bool( 125 re.search( 126 rf"\b{re.escape(matching_model.match)}\b", 127 document_content, 128 **search_kwargs, 129 ), 130 ) 131 if result: 132 log_reason( 133 matching_model, 134 document, 135 f'it contains this string: "{matching_model.match}"', 136 ) 137 return result 138 139 elif matching_model.matching_algorithm == MatchingModel.MATCH_REGEX: 140 try: 141 match = re.search( 142 re.compile(matching_model.match, **search_kwargs), 143 document_content, 144 ) 145 except re.error: 146 logger.error( 147 f"Error while processing regular expression {matching_model.match}", 148 ) 149 return False 150 if match: 151 log_reason( 152 matching_model, 153 document, 154 f"the string {match.group()} matches the regular expression " 155 f"{matching_model.match}", 156 ) 157 return bool(match) 158 159 elif matching_model.matching_algorithm == MatchingModel.MATCH_FUZZY: 160 from rapidfuzz import fuzz 161 162 match = re.sub(r"[^\w\s]", "", matching_model.match) 163 text = re.sub(r"[^\w\s]", "", document_content) 164 if matching_model.is_insensitive: 165 match = match.lower() 166 text = text.lower() 167 if fuzz.partial_ratio(match, text, score_cutoff=90): 168 # TODO: make this better 169 log_reason( 170 matching_model, 171 document, 172 f"parts of the document content somehow match the string " 173 f"{matching_model.match}", 174 ) 175 return True 176 else: 177 return False 178 179 elif matching_model.matching_algorithm == MatchingModel.MATCH_AUTO: 180 # this is done elsewhere. 181 return False 182 183 else: 184 raise NotImplementedError("Unsupported matching algorithm") 185 186 187 def _split_match(matching_model): 188 """ 189 Splits the match to individual keywords, getting rid of unnecessary 190 spaces and grouping quoted words together. 191 192 Example: 193 ' some random words "with quotes " and spaces' 194 ==> 195 ["some", "random", "words", "with+quotes", "and", "spaces"] 196 """ 197 findterms = re.compile(r'"([^"]+)"|(\S+)').findall 198 normspace = re.compile(r"\s+").sub 199 return [ 200 # normspace(" ", (t[0] or t[1]).strip()).replace(" ", r"\s+") 201 re.escape(normspace(" ", (t[0] or t[1]).strip())).replace(r"\ ", r"\s+") 202 for t in findterms(matching_model.match) 203 ] 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/documents/matching.py b/src/documents/matching.py --- a/src/documents/matching.py +++ b/src/documents/matching.py @@ -22,6 +22,9 @@ def match_correspondents(document, classifier, user=None): pred_id = classifier.predict_correspondent(document.content) if classifier else None + if user is None and document.owner is not None: + user = document.owner + if user is not None: correspondents = get_objects_for_user_owner_aware( user, @@ -39,6 +42,9 @@ def match_document_types(document, classifier, user=None): pred_id = classifier.predict_document_type(document.content) if classifier else None + if user is None and document.owner is not None: + user = document.owner + if user is not None: document_types = get_objects_for_user_owner_aware( user, @@ -56,6 +62,9 @@ def match_tags(document, classifier, user=None): predicted_tag_ids = classifier.predict_tags(document.content) if classifier else [] + if user is None and document.owner is not None: + user = document.owner + if user is not None: tags = get_objects_for_user_owner_aware(user, "documents.view_tag", Tag) else: @@ -69,6 +78,9 @@ def match_storage_paths(document, classifier, user=None): pred_id = classifier.predict_storage_path(document.content) if classifier else None + if user is None and document.owner is not None: + user = document.owner + if user is not None: storage_paths = get_objects_for_user_owner_aware( user,
{"golden_diff": "diff --git a/src/documents/matching.py b/src/documents/matching.py\n--- a/src/documents/matching.py\n+++ b/src/documents/matching.py\n@@ -22,6 +22,9 @@\n def match_correspondents(document, classifier, user=None):\n pred_id = classifier.predict_correspondent(document.content) if classifier else None\n \n+ if user is None and document.owner is not None:\n+ user = document.owner\n+\n if user is not None:\n correspondents = get_objects_for_user_owner_aware(\n user,\n@@ -39,6 +42,9 @@\n def match_document_types(document, classifier, user=None):\n pred_id = classifier.predict_document_type(document.content) if classifier else None\n \n+ if user is None and document.owner is not None:\n+ user = document.owner\n+\n if user is not None:\n document_types = get_objects_for_user_owner_aware(\n user,\n@@ -56,6 +62,9 @@\n def match_tags(document, classifier, user=None):\n predicted_tag_ids = classifier.predict_tags(document.content) if classifier else []\n \n+ if user is None and document.owner is not None:\n+ user = document.owner\n+\n if user is not None:\n tags = get_objects_for_user_owner_aware(user, \"documents.view_tag\", Tag)\n else:\n@@ -69,6 +78,9 @@\n def match_storage_paths(document, classifier, user=None):\n pred_id = classifier.predict_storage_path(document.content) if classifier else None\n \n+ if user is None and document.owner is not None:\n+ user = document.owner\n+\n if user is not None:\n storage_paths = get_objects_for_user_owner_aware(\n user,\n", "issue": "[BUG] Tags user has no permissions for show as \"Private\" after file upload\n### Description\n\nWhen a user uploads a file that triggers Tag-Rules of other users the \"detected\" Tags show up as Private as the uploading user does not have the permissions to view these Tags.\r\n\r\n(The image is german so Privat = Private)\r\n![s8u2Hd0b07ZqJ5n5](https://user-images.githubusercontent.com/46486021/234629541-7e37e389-c3b6-4ac6-bb4a-f201ecd6f2af.png)\r\n\r\nI don't know if that is a feature or not ... for me its confusing.\r\n\r\n(\"Neu\" is the Inbox-Tag ... the only one that should show here)\n\n### Steps to reproduce\n\n1. Login as User A\r\n2. Create Tag \"Test\" with rule to add when \"Test\" is detected as a Word in the Document\r\n3. Set Permission of this Tag to User A only\r\n4. Login as User B\r\n5. Upload a file with the word \"Test\" in it\r\n6. See a Private-Tag added by paperless\n\n### Webserver logs\n\n```bash\n-\n```\n\n\n### Browser logs\n\n```bash\n-\n```\n\n\n### Paperless-ngx version\n\n1.14.0\n\n### Host OS\n\nSynology DSM 7.1.1-42962 Update 5\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\n-\n\n### Other\n\n-\n", "before_files": [{"content": "import logging\nimport re\n\nfrom documents.models import Correspondent\nfrom documents.models import DocumentType\nfrom documents.models import MatchingModel\nfrom documents.models import StoragePath\nfrom documents.models import Tag\nfrom documents.permissions import get_objects_for_user_owner_aware\n\nlogger = logging.getLogger(\"paperless.matching\")\n\n\ndef log_reason(matching_model, document, reason):\n class_name = type(matching_model).__name__\n logger.debug(\n f\"{class_name} {matching_model.name} matched on document \"\n f\"{document} because {reason}\",\n )\n\n\ndef match_correspondents(document, classifier, user=None):\n pred_id = classifier.predict_correspondent(document.content) if classifier else None\n\n if user is not None:\n correspondents = get_objects_for_user_owner_aware(\n user,\n \"documents.view_correspondent\",\n Correspondent,\n )\n else:\n correspondents = Correspondent.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk == pred_id, correspondents),\n )\n\n\ndef match_document_types(document, classifier, user=None):\n pred_id = classifier.predict_document_type(document.content) if classifier else None\n\n if user is not None:\n document_types = get_objects_for_user_owner_aware(\n user,\n \"documents.view_documenttype\",\n DocumentType,\n )\n else:\n document_types = DocumentType.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk == pred_id, document_types),\n )\n\n\ndef match_tags(document, classifier, user=None):\n predicted_tag_ids = classifier.predict_tags(document.content) if classifier else []\n\n if user is not None:\n tags = get_objects_for_user_owner_aware(user, \"documents.view_tag\", Tag)\n else:\n tags = Tag.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk in predicted_tag_ids, tags),\n )\n\n\ndef match_storage_paths(document, classifier, user=None):\n pred_id = classifier.predict_storage_path(document.content) if classifier else None\n\n if user is not None:\n storage_paths = get_objects_for_user_owner_aware(\n user,\n \"documents.view_storagepath\",\n StoragePath,\n )\n else:\n storage_paths = StoragePath.objects.all()\n\n return list(\n filter(\n lambda o: matches(o, document) or o.pk == pred_id,\n storage_paths,\n ),\n )\n\n\ndef matches(matching_model, document):\n search_kwargs = {}\n\n document_content = document.content\n\n # Check that match is not empty\n if not matching_model.match.strip():\n return False\n\n if matching_model.is_insensitive:\n search_kwargs = {\"flags\": re.IGNORECASE}\n\n if matching_model.matching_algorithm == MatchingModel.MATCH_NONE:\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ALL:\n for word in _split_match(matching_model):\n search_result = re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs)\n if not search_result:\n return False\n log_reason(\n matching_model,\n document,\n f\"it contains all of these words: {matching_model.match}\",\n )\n return True\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ANY:\n for word in _split_match(matching_model):\n if re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs):\n log_reason(matching_model, document, f\"it contains this word: {word}\")\n return True\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_LITERAL:\n result = bool(\n re.search(\n rf\"\\b{re.escape(matching_model.match)}\\b\",\n document_content,\n **search_kwargs,\n ),\n )\n if result:\n log_reason(\n matching_model,\n document,\n f'it contains this string: \"{matching_model.match}\"',\n )\n return result\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_REGEX:\n try:\n match = re.search(\n re.compile(matching_model.match, **search_kwargs),\n document_content,\n )\n except re.error:\n logger.error(\n f\"Error while processing regular expression {matching_model.match}\",\n )\n return False\n if match:\n log_reason(\n matching_model,\n document,\n f\"the string {match.group()} matches the regular expression \"\n f\"{matching_model.match}\",\n )\n return bool(match)\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_FUZZY:\n from rapidfuzz import fuzz\n\n match = re.sub(r\"[^\\w\\s]\", \"\", matching_model.match)\n text = re.sub(r\"[^\\w\\s]\", \"\", document_content)\n if matching_model.is_insensitive:\n match = match.lower()\n text = text.lower()\n if fuzz.partial_ratio(match, text, score_cutoff=90):\n # TODO: make this better\n log_reason(\n matching_model,\n document,\n f\"parts of the document content somehow match the string \"\n f\"{matching_model.match}\",\n )\n return True\n else:\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_AUTO:\n # this is done elsewhere.\n return False\n\n else:\n raise NotImplementedError(\"Unsupported matching algorithm\")\n\n\ndef _split_match(matching_model):\n \"\"\"\n Splits the match to individual keywords, getting rid of unnecessary\n spaces and grouping quoted words together.\n\n Example:\n ' some random words \"with quotes \" and spaces'\n ==>\n [\"some\", \"random\", \"words\", \"with+quotes\", \"and\", \"spaces\"]\n \"\"\"\n findterms = re.compile(r'\"([^\"]+)\"|(\\S+)').findall\n normspace = re.compile(r\"\\s+\").sub\n return [\n # normspace(\" \", (t[0] or t[1]).strip()).replace(\" \", r\"\\s+\")\n re.escape(normspace(\" \", (t[0] or t[1]).strip())).replace(r\"\\ \", r\"\\s+\")\n for t in findterms(matching_model.match)\n ]\n", "path": "src/documents/matching.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom documents.models import Correspondent\nfrom documents.models import DocumentType\nfrom documents.models import MatchingModel\nfrom documents.models import StoragePath\nfrom documents.models import Tag\nfrom documents.permissions import get_objects_for_user_owner_aware\n\nlogger = logging.getLogger(\"paperless.matching\")\n\n\ndef log_reason(matching_model, document, reason):\n class_name = type(matching_model).__name__\n logger.debug(\n f\"{class_name} {matching_model.name} matched on document \"\n f\"{document} because {reason}\",\n )\n\n\ndef match_correspondents(document, classifier, user=None):\n pred_id = classifier.predict_correspondent(document.content) if classifier else None\n\n if user is None and document.owner is not None:\n user = document.owner\n\n if user is not None:\n correspondents = get_objects_for_user_owner_aware(\n user,\n \"documents.view_correspondent\",\n Correspondent,\n )\n else:\n correspondents = Correspondent.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk == pred_id, correspondents),\n )\n\n\ndef match_document_types(document, classifier, user=None):\n pred_id = classifier.predict_document_type(document.content) if classifier else None\n\n if user is None and document.owner is not None:\n user = document.owner\n\n if user is not None:\n document_types = get_objects_for_user_owner_aware(\n user,\n \"documents.view_documenttype\",\n DocumentType,\n )\n else:\n document_types = DocumentType.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk == pred_id, document_types),\n )\n\n\ndef match_tags(document, classifier, user=None):\n predicted_tag_ids = classifier.predict_tags(document.content) if classifier else []\n\n if user is None and document.owner is not None:\n user = document.owner\n\n if user is not None:\n tags = get_objects_for_user_owner_aware(user, \"documents.view_tag\", Tag)\n else:\n tags = Tag.objects.all()\n\n return list(\n filter(lambda o: matches(o, document) or o.pk in predicted_tag_ids, tags),\n )\n\n\ndef match_storage_paths(document, classifier, user=None):\n pred_id = classifier.predict_storage_path(document.content) if classifier else None\n\n if user is None and document.owner is not None:\n user = document.owner\n\n if user is not None:\n storage_paths = get_objects_for_user_owner_aware(\n user,\n \"documents.view_storagepath\",\n StoragePath,\n )\n else:\n storage_paths = StoragePath.objects.all()\n\n return list(\n filter(\n lambda o: matches(o, document) or o.pk == pred_id,\n storage_paths,\n ),\n )\n\n\ndef matches(matching_model, document):\n search_kwargs = {}\n\n document_content = document.content\n\n # Check that match is not empty\n if not matching_model.match.strip():\n return False\n\n if matching_model.is_insensitive:\n search_kwargs = {\"flags\": re.IGNORECASE}\n\n if matching_model.matching_algorithm == MatchingModel.MATCH_NONE:\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ALL:\n for word in _split_match(matching_model):\n search_result = re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs)\n if not search_result:\n return False\n log_reason(\n matching_model,\n document,\n f\"it contains all of these words: {matching_model.match}\",\n )\n return True\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_ANY:\n for word in _split_match(matching_model):\n if re.search(rf\"\\b{word}\\b\", document_content, **search_kwargs):\n log_reason(matching_model, document, f\"it contains this word: {word}\")\n return True\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_LITERAL:\n result = bool(\n re.search(\n rf\"\\b{re.escape(matching_model.match)}\\b\",\n document_content,\n **search_kwargs,\n ),\n )\n if result:\n log_reason(\n matching_model,\n document,\n f'it contains this string: \"{matching_model.match}\"',\n )\n return result\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_REGEX:\n try:\n match = re.search(\n re.compile(matching_model.match, **search_kwargs),\n document_content,\n )\n except re.error:\n logger.error(\n f\"Error while processing regular expression {matching_model.match}\",\n )\n return False\n if match:\n log_reason(\n matching_model,\n document,\n f\"the string {match.group()} matches the regular expression \"\n f\"{matching_model.match}\",\n )\n return bool(match)\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_FUZZY:\n from rapidfuzz import fuzz\n\n match = re.sub(r\"[^\\w\\s]\", \"\", matching_model.match)\n text = re.sub(r\"[^\\w\\s]\", \"\", document_content)\n if matching_model.is_insensitive:\n match = match.lower()\n text = text.lower()\n if fuzz.partial_ratio(match, text, score_cutoff=90):\n # TODO: make this better\n log_reason(\n matching_model,\n document,\n f\"parts of the document content somehow match the string \"\n f\"{matching_model.match}\",\n )\n return True\n else:\n return False\n\n elif matching_model.matching_algorithm == MatchingModel.MATCH_AUTO:\n # this is done elsewhere.\n return False\n\n else:\n raise NotImplementedError(\"Unsupported matching algorithm\")\n\n\ndef _split_match(matching_model):\n \"\"\"\n Splits the match to individual keywords, getting rid of unnecessary\n spaces and grouping quoted words together.\n\n Example:\n ' some random words \"with quotes \" and spaces'\n ==>\n [\"some\", \"random\", \"words\", \"with+quotes\", \"and\", \"spaces\"]\n \"\"\"\n findterms = re.compile(r'\"([^\"]+)\"|(\\S+)').findall\n normspace = re.compile(r\"\\s+\").sub\n return [\n # normspace(\" \", (t[0] or t[1]).strip()).replace(\" \", r\"\\s+\")\n re.escape(normspace(\" \", (t[0] or t[1]).strip())).replace(r\"\\ \", r\"\\s+\")\n for t in findterms(matching_model.match)\n ]\n", "path": "src/documents/matching.py"}]}
2,441
368
gh_patches_debug_14876
rasdani/github-patches
git_diff
pyca__cryptography-2445
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cryptography.x509.Name implements __eq__ but not __hash__ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/x509/name.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import six 8 9 from cryptography import utils 10 from cryptography.x509.oid import ObjectIdentifier 11 12 13 class NameAttribute(object): 14 def __init__(self, oid, value): 15 if not isinstance(oid, ObjectIdentifier): 16 raise TypeError( 17 "oid argument must be an ObjectIdentifier instance." 18 ) 19 20 if not isinstance(value, six.text_type): 21 raise TypeError( 22 "value argument must be a text type." 23 ) 24 25 self._oid = oid 26 self._value = value 27 28 oid = utils.read_only_property("_oid") 29 value = utils.read_only_property("_value") 30 31 def __eq__(self, other): 32 if not isinstance(other, NameAttribute): 33 return NotImplemented 34 35 return ( 36 self.oid == other.oid and 37 self.value == other.value 38 ) 39 40 def __ne__(self, other): 41 return not self == other 42 43 def __repr__(self): 44 return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self) 45 46 47 class Name(object): 48 def __init__(self, attributes): 49 self._attributes = attributes 50 51 def get_attributes_for_oid(self, oid): 52 return [i for i in self if i.oid == oid] 53 54 def __eq__(self, other): 55 if not isinstance(other, Name): 56 return NotImplemented 57 58 return self._attributes == other._attributes 59 60 def __ne__(self, other): 61 return not self == other 62 63 def __iter__(self): 64 return iter(self._attributes) 65 66 def __len__(self): 67 return len(self._attributes) 68 69 def __repr__(self): 70 return "<Name({0!r})>".format(self._attributes) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py --- a/src/cryptography/x509/name.py +++ b/src/cryptography/x509/name.py @@ -40,6 +40,9 @@ def __ne__(self, other): return not self == other + def __hash__(self): + return hash((self.oid, self.value)) + def __repr__(self): return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self) @@ -60,6 +63,11 @@ def __ne__(self, other): return not self == other + def __hash__(self): + # TODO: this is relatively expensive, if this looks like a bottleneck + # for you, consider optimizing! + return hash(tuple(self._attributes)) + def __iter__(self): return iter(self._attributes)
{"golden_diff": "diff --git a/src/cryptography/x509/name.py b/src/cryptography/x509/name.py\n--- a/src/cryptography/x509/name.py\n+++ b/src/cryptography/x509/name.py\n@@ -40,6 +40,9 @@\n def __ne__(self, other):\n return not self == other\n \n+ def __hash__(self):\n+ return hash((self.oid, self.value))\n+\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n \n@@ -60,6 +63,11 @@\n def __ne__(self, other):\n return not self == other\n \n+ def __hash__(self):\n+ # TODO: this is relatively expensive, if this looks like a bottleneck\n+ # for you, consider optimizing!\n+ return hash(tuple(self._attributes))\n+\n def __iter__(self):\n return iter(self._attributes)\n", "issue": "cryptography.x509.Name implements __eq__ but not __hash__\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.x509.oid import ObjectIdentifier\n\n\nclass NameAttribute(object):\n def __init__(self, oid, value):\n if not isinstance(oid, ObjectIdentifier):\n raise TypeError(\n \"oid argument must be an ObjectIdentifier instance.\"\n )\n\n if not isinstance(value, six.text_type):\n raise TypeError(\n \"value argument must be a text type.\"\n )\n\n self._oid = oid\n self._value = value\n\n oid = utils.read_only_property(\"_oid\")\n value = utils.read_only_property(\"_value\")\n\n def __eq__(self, other):\n if not isinstance(other, NameAttribute):\n return NotImplemented\n\n return (\n self.oid == other.oid and\n self.value == other.value\n )\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((self.oid, self.value))\n\n def __repr__(self):\n return \"<NameAttribute(oid={0.oid}, value={0.value!r})>\".format(self)\n\n\nclass Name(object):\n def __init__(self, attributes):\n self._attributes = attributes\n\n def get_attributes_for_oid(self, oid):\n return [i for i in self if i.oid == oid]\n\n def __eq__(self, other):\n if not isinstance(other, Name):\n return NotImplemented\n\n return self._attributes == other._attributes\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n # TODO: this is relatively expensive, if this looks like a bottleneck\n # for you, consider optimizing!\n return hash(tuple(self._attributes))\n\n def __iter__(self):\n return iter(self._attributes)\n\n def __len__(self):\n return len(self._attributes)\n\n def __repr__(self):\n return \"<Name({0!r})>\".format(self._attributes)\n", "path": "src/cryptography/x509/name.py"}]}
845
215
gh_patches_debug_19283
rasdani/github-patches
git_diff
conda__conda-1735
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Race condition for root environment detection Periodically, when two conda processes are running at the same time, it is possible to see a race condition on determining whether the root environment is writable. Notice how the following produces two different configs from the same setup: ``` $ conda info & conda info Current conda install: platform : osx-64 conda version : 3.18.3 conda-build version : 1.18.0 python version : 2.7.10.final.0 requests version : 2.8.1 root environment : /Users/pelson/miniconda (read only) default environment : /Users/pelson/miniconda envs directories : /Users/pelson/.conda/envs /Users/pelson/envs /Users/pelson/miniconda/envs package cache : /Users/pelson/.conda/envs/.pkgs /Users/pelson/envs/.pkgs /Users/pelson/miniconda/pkgs ... Current conda install: platform : osx-64 conda version : 3.18.3 conda-build version : 1.18.0 python version : 2.7.10.final.0 requests version : 2.8.1 root environment : /Users/pelson/miniconda (writable) default environment : /Users/pelson/miniconda envs directories : /Users/pelson/miniconda/envs package cache : /Users/pelson/miniconda/pkgs ... ``` The offending line is in https://github.com/conda/conda/blob/master/conda/config.py#L135-L143 and https://github.com/conda/conda/blob/master/conda/utils.py#L43-L54. My assumption is that the `.conda-try-write` is being removed by the other process, and the exception is being raised in the `finally` block. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda/utils.py` Content: ``` 1 from __future__ import print_function, division, absolute_import 2 3 import logging 4 import sys 5 import hashlib 6 import collections 7 from functools import partial 8 from os.path import abspath, isdir, join 9 import os 10 11 log = logging.getLogger(__name__) 12 stderrlog = logging.getLogger('stderrlog') 13 14 def can_open(file): 15 """ 16 Return True if the given ``file`` can be opened for writing 17 """ 18 try: 19 fp = open(file, "ab") 20 fp.close() 21 return True 22 except IOError: 23 stderrlog.info("Unable to open %s\n" % file) 24 return False 25 26 27 def can_open_all(files): 28 """ 29 Return True if all of the provided ``files`` can be opened 30 """ 31 for f in files: 32 if not can_open(f): 33 return False 34 return True 35 36 37 def can_open_all_files_in_prefix(prefix, files): 38 """ 39 Returns True if all ``files`` at a given ``prefix`` can be opened 40 """ 41 return can_open_all((os.path.join(prefix, f) for f in files)) 42 43 def try_write(dir_path): 44 assert isdir(dir_path) 45 try: 46 try: 47 with open(join(dir_path, '.conda-try-write'), mode='wb') as fo: 48 fo.write(b'This is a test file.\n') 49 return True 50 finally: 51 # XXX: If this raises an exception it will also return False 52 os.unlink(join(dir_path, '.conda-try-write')) 53 except (IOError, OSError): 54 return False 55 56 57 def hashsum_file(path, mode='md5'): 58 h = hashlib.new(mode) 59 with open(path, 'rb') as fi: 60 while True: 61 chunk = fi.read(262144) # process chunks of 256KB 62 if not chunk: 63 break 64 h.update(chunk) 65 return h.hexdigest() 66 67 68 def md5_file(path): 69 return hashsum_file(path, 'md5') 70 71 72 def url_path(path): 73 path = abspath(path) 74 if sys.platform == 'win32': 75 path = '/' + path.replace(':', '|').replace('\\', '/') 76 return 'file://%s' % path 77 78 79 def human_bytes(n): 80 """ 81 Return the number of bytes n in more human readable form. 82 """ 83 if n < 1024: 84 return '%d B' % n 85 k = n/1024 86 if k < 1024: 87 return '%d KB' % round(k) 88 m = k/1024 89 if m < 1024: 90 return '%.1f MB' % m 91 g = m/1024 92 return '%.2f GB' % g 93 94 95 class memoized(object): 96 """Decorator. Caches a function's return value each time it is called. 97 If called later with the same arguments, the cached value is returned 98 (not reevaluated). 99 """ 100 def __init__(self, func): 101 self.func = func 102 self.cache = {} 103 def __call__(self, *args, **kw): 104 newargs = [] 105 for arg in args: 106 if isinstance(arg, list): 107 newargs.append(tuple(arg)) 108 elif not isinstance(arg, collections.Hashable): 109 # uncacheable. a list, for instance. 110 # better to not cache than blow up. 111 return self.func(*args, **kw) 112 else: 113 newargs.append(arg) 114 newargs = tuple(newargs) 115 key = (newargs, frozenset(kw.items())) 116 if key in self.cache: 117 return self.cache[key] 118 else: 119 value = self.func(*args, **kw) 120 self.cache[key] = value 121 return value 122 123 124 # For instance methods only 125 class memoize(object): # 577452 126 def __init__(self, func): 127 self.func = func 128 def __get__(self, obj, objtype=None): 129 if obj is None: 130 return self.func 131 return partial(self, obj) 132 def __call__(self, *args, **kw): 133 obj = args[0] 134 try: 135 cache = obj.__cache 136 except AttributeError: 137 cache = obj.__cache = {} 138 key = (self.func, args[1:], frozenset(kw.items())) 139 try: 140 res = cache[key] 141 except KeyError: 142 res = cache[key] = self.func(*args, **kw) 143 return res 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/utils.py b/conda/utils.py --- a/conda/utils.py +++ b/conda/utils.py @@ -5,8 +5,10 @@ import hashlib import collections from functools import partial -from os.path import abspath, isdir, join +from os.path import abspath, isdir import os +import tempfile + log = logging.getLogger(__name__) stderrlog = logging.getLogger('stderrlog') @@ -43,13 +45,10 @@ def try_write(dir_path): assert isdir(dir_path) try: - try: - with open(join(dir_path, '.conda-try-write'), mode='wb') as fo: - fo.write(b'This is a test file.\n') - return True - finally: - # XXX: If this raises an exception it will also return False - os.unlink(join(dir_path, '.conda-try-write')) + with tempfile.TemporaryFile(prefix='.conda-try-write', + dir=dir_path) as fo: + fo.write(b'This is a test file.\n') + return True except (IOError, OSError): return False
{"golden_diff": "diff --git a/conda/utils.py b/conda/utils.py\n--- a/conda/utils.py\n+++ b/conda/utils.py\n@@ -5,8 +5,10 @@\n import hashlib\n import collections\n from functools import partial\n-from os.path import abspath, isdir, join\n+from os.path import abspath, isdir\n import os\n+import tempfile\n+\n \n log = logging.getLogger(__name__)\n stderrlog = logging.getLogger('stderrlog')\n@@ -43,13 +45,10 @@\n def try_write(dir_path):\n assert isdir(dir_path)\n try:\n- try:\n- with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:\n- fo.write(b'This is a test file.\\n')\n- return True\n- finally:\n- # XXX: If this raises an exception it will also return False\n- os.unlink(join(dir_path, '.conda-try-write'))\n+ with tempfile.TemporaryFile(prefix='.conda-try-write',\n+ dir=dir_path) as fo:\n+ fo.write(b'This is a test file.\\n')\n+ return True\n except (IOError, OSError):\n return False\n", "issue": "Race condition for root environment detection\nPeriodically, when two conda processes are running at the same time, it is possible to see a race condition on determining whether the root environment is writable. Notice how the following produces two different configs from the same setup:\n\n```\n$ conda info & conda info \n\nCurrent conda install:\n\n platform : osx-64\n conda version : 3.18.3\n conda-build version : 1.18.0\n python version : 2.7.10.final.0\n requests version : 2.8.1\n root environment : /Users/pelson/miniconda (read only)\n default environment : /Users/pelson/miniconda\n envs directories : /Users/pelson/.conda/envs\n /Users/pelson/envs\n /Users/pelson/miniconda/envs\n package cache : /Users/pelson/.conda/envs/.pkgs\n /Users/pelson/envs/.pkgs\n /Users/pelson/miniconda/pkgs\n...\n\nCurrent conda install:\n\n platform : osx-64\n conda version : 3.18.3\n conda-build version : 1.18.0\n python version : 2.7.10.final.0\n requests version : 2.8.1\n root environment : /Users/pelson/miniconda (writable)\n default environment : /Users/pelson/miniconda\n envs directories : /Users/pelson/miniconda/envs\n package cache : /Users/pelson/miniconda/pkgs\n...\n```\n\nThe offending line is in https://github.com/conda/conda/blob/master/conda/config.py#L135-L143 and https://github.com/conda/conda/blob/master/conda/utils.py#L43-L54. My assumption is that the `.conda-try-write` is being removed by the other process, and the exception is being raised in the `finally` block.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport sys\nimport hashlib\nimport collections\nfrom functools import partial\nfrom os.path import abspath, isdir, join\nimport os\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndef can_open(file):\n \"\"\"\n Return True if the given ``file`` can be opened for writing\n \"\"\"\n try:\n fp = open(file, \"ab\")\n fp.close()\n return True\n except IOError:\n stderrlog.info(\"Unable to open %s\\n\" % file)\n return False\n\n\ndef can_open_all(files):\n \"\"\"\n Return True if all of the provided ``files`` can be opened\n \"\"\"\n for f in files:\n if not can_open(f):\n return False\n return True\n\n\ndef can_open_all_files_in_prefix(prefix, files):\n \"\"\"\n Returns True if all ``files`` at a given ``prefix`` can be opened\n \"\"\"\n return can_open_all((os.path.join(prefix, f) for f in files))\n\ndef try_write(dir_path):\n assert isdir(dir_path)\n try:\n try:\n with open(join(dir_path, '.conda-try-write'), mode='wb') as fo:\n fo.write(b'This is a test file.\\n')\n return True\n finally:\n # XXX: If this raises an exception it will also return False\n os.unlink(join(dir_path, '.conda-try-write'))\n except (IOError, OSError):\n return False\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef url_path(path):\n path = abspath(path)\n if sys.platform == 'win32':\n path = '/' + path.replace(':', '|').replace('\\\\', '/')\n return 'file://%s' % path\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(kw.items()))\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(kw.items()))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n", "path": "conda/utils.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport sys\nimport hashlib\nimport collections\nfrom functools import partial\nfrom os.path import abspath, isdir\nimport os\nimport tempfile\n\n\nlog = logging.getLogger(__name__)\nstderrlog = logging.getLogger('stderrlog')\n\ndef can_open(file):\n \"\"\"\n Return True if the given ``file`` can be opened for writing\n \"\"\"\n try:\n fp = open(file, \"ab\")\n fp.close()\n return True\n except IOError:\n stderrlog.info(\"Unable to open %s\\n\" % file)\n return False\n\n\ndef can_open_all(files):\n \"\"\"\n Return True if all of the provided ``files`` can be opened\n \"\"\"\n for f in files:\n if not can_open(f):\n return False\n return True\n\n\ndef can_open_all_files_in_prefix(prefix, files):\n \"\"\"\n Returns True if all ``files`` at a given ``prefix`` can be opened\n \"\"\"\n return can_open_all((os.path.join(prefix, f) for f in files))\n\ndef try_write(dir_path):\n assert isdir(dir_path)\n try:\n with tempfile.TemporaryFile(prefix='.conda-try-write',\n dir=dir_path) as fo:\n fo.write(b'This is a test file.\\n')\n return True\n except (IOError, OSError):\n return False\n\n\ndef hashsum_file(path, mode='md5'):\n h = hashlib.new(mode)\n with open(path, 'rb') as fi:\n while True:\n chunk = fi.read(262144) # process chunks of 256KB\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef md5_file(path):\n return hashsum_file(path, 'md5')\n\n\ndef url_path(path):\n path = abspath(path)\n if sys.platform == 'win32':\n path = '/' + path.replace(':', '|').replace('\\\\', '/')\n return 'file://%s' % path\n\n\ndef human_bytes(n):\n \"\"\"\n Return the number of bytes n in more human readable form.\n \"\"\"\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g\n\n\nclass memoized(object):\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, collections.Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(kw.items()))\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\n# For instance methods only\nclass memoize(object): # 577452\n def __init__(self, func):\n self.func = func\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self.func\n return partial(self, obj)\n def __call__(self, *args, **kw):\n obj = args[0]\n try:\n cache = obj.__cache\n except AttributeError:\n cache = obj.__cache = {}\n key = (self.func, args[1:], frozenset(kw.items()))\n try:\n res = cache[key]\n except KeyError:\n res = cache[key] = self.func(*args, **kw)\n return res\n", "path": "conda/utils.py"}]}
2,003
261
gh_patches_debug_14991
rasdani/github-patches
git_diff
gammapy__gammapy-3759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error while printing catalogs ``` from gammapy.catalog import * catalog = SourceCatalog4FGL() print(catalog) ``` gives an error ``` --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) /var/folders/v4/62d6cvbj72qbyw23mq2pwxl00000gn/T/ipykernel_17380/3277138734.py in <module> 1 catalog = SourceCatalog4FGL() ----> 2 print(catalog) ~/Gammapy-dev/gammapy/gammapy/catalog/core.py in __str__(self) 110 def __str__(self): 111 return ( --> 112 f"{self.__class__.__name__}:\n" 113 f" name: {self.name}\n" 114 f" description: {self.description}\n" AttributeError: 'SourceCatalog4FGL' object has no attribute 'name' ``` This happens for all catalogs --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gammapy/catalog/core.py` Content: ``` 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 """Source catalog and object base classes.""" 3 import abc 4 import numbers 5 from copy import deepcopy 6 import numpy as np 7 from astropy.coordinates import SkyCoord 8 from astropy.utils import lazyproperty 9 from gammapy.maps import TimeMapAxis 10 from gammapy.modeling.models import Models 11 from gammapy.utils.table import table_from_row_data, table_row_to_dict 12 13 __all__ = ["SourceCatalog", "SourceCatalogObject"] 14 15 16 # https://pydanny.blogspot.com/2011/11/loving-bunch-class.html 17 class Bunch(dict): 18 def __init__(self, **kw): 19 dict.__init__(self, kw) 20 self.__dict__.update(kw) 21 22 23 def format_flux_points_table(table): 24 for column in table.colnames: 25 if column.startswith(("dnde", "eflux", "flux", "e2dnde", "ref")): 26 table[column].format = ".3e" 27 elif column.startswith( 28 ("e_min", "e_max", "e_ref", "sqrt_ts", "norm", "ts", "stat") 29 ): 30 table[column].format = ".3f" 31 32 return table 33 34 35 class SourceCatalogObject: 36 """Source catalog object. 37 38 This class can be used directly, but it is mostly used as a 39 base class for the other source catalog classes. 40 41 The catalog data on this source is stored in the `source.data` 42 attribute as a dict. 43 44 The source catalog object is decoupled from the source catalog, 45 it doesn't hold a reference back to it, except for a key 46 ``_row_index`` of type ``int`` that links to the catalog table 47 row the source information comes from. 48 """ 49 50 _source_name_key = "Source_Name" 51 _row_index_key = "_row_index" 52 53 def __init__(self, data, data_extended=None): 54 self.data = Bunch(**data) 55 if data_extended: 56 self.data_extended = Bunch(**data_extended) 57 58 @property 59 def name(self): 60 """Source name (str)""" 61 name = self.data[self._source_name_key] 62 return name.strip() 63 64 @property 65 def row_index(self): 66 """Row index of source in catalog (int)""" 67 return self.data[self._row_index_key] 68 69 @property 70 def position(self): 71 """Source position (`~astropy.coordinates.SkyCoord`).""" 72 table = table_from_row_data([self.data]) 73 return _skycoord_from_table(table)[0] 74 75 76 class SourceCatalog(abc.ABC): 77 """Generic source catalog. 78 79 This class can be used directly, but it is mostly used as a 80 base class for the other source catalog classes. 81 82 This is a thin wrapper around `~astropy.table.Table`, 83 which is stored in the ``catalog.table`` attribute. 84 85 Parameters 86 ---------- 87 table : `~astropy.table.Table` 88 Table with catalog data. 89 source_name_key : str 90 Column with source name information 91 source_name_alias : tuple of str 92 Columns with source name aliases. This will allow accessing the source 93 row by alias names as well. 94 """ 95 96 @classmethod 97 @abc.abstractmethod 98 def description(cls): 99 """Catalog description (str).""" 100 pass 101 102 source_object_class = SourceCatalogObject 103 """Source class (`SourceCatalogObject`).""" 104 105 def __init__(self, table, source_name_key="Source_Name", source_name_alias=()): 106 self.table = table 107 self._source_name_key = source_name_key 108 self._source_name_alias = source_name_alias 109 110 def __str__(self): 111 return ( 112 f"{self.__class__.__name__}:\n" 113 f" name: {self.name}\n" 114 f" description: {self.description}\n" 115 f" sources: {len(self.table)}\n" 116 ) 117 118 @lazyproperty 119 def _name_to_index_cache(self): 120 # Make a dict for quick lookup: source name -> row index 121 names = {} 122 for idx, row in enumerate(self.table): 123 name = row[self._source_name_key] 124 names[name.strip()] = idx 125 for alias_column in self._source_name_alias: 126 for alias in str(row[alias_column]).split(","): 127 if not alias == "": 128 names[alias.strip()] = idx 129 return names 130 131 def row_index(self, name): 132 """Look up row index of source by name. 133 134 Parameters 135 ---------- 136 name : str 137 Source name 138 139 Returns 140 ------- 141 index : int 142 Row index of source in table 143 """ 144 index = self._name_to_index_cache[name] 145 row = self.table[index] 146 # check if name lookup is correct other wise recompute _name_to_index_cache 147 148 possible_names = [row[self._source_name_key]] 149 for alias_column in self._source_name_alias: 150 possible_names += str(row[alias_column]).split(",") 151 152 if name not in possible_names: 153 self.__dict__.pop("_name_to_index_cache") 154 index = self._name_to_index_cache[name] 155 156 return index 157 158 def source_name(self, index): 159 """Look up source name by row index. 160 161 Parameters 162 ---------- 163 index : int 164 Row index of source in table 165 """ 166 source_name_col = self.table[self._source_name_key] 167 name = source_name_col[index] 168 return name.strip() 169 170 def __getitem__(self, key): 171 """Get source by name. 172 173 Parameters 174 ---------- 175 key : str or int 176 Source name or row index 177 178 Returns 179 ------- 180 source : `SourceCatalogObject` 181 An object representing one source 182 """ 183 if isinstance(key, str): 184 index = self.row_index(key) 185 elif isinstance(key, numbers.Integral): 186 index = key 187 elif isinstance(key, np.ndarray) and key.dtype == bool: 188 new = deepcopy(self) 189 new.table = self.table[key] 190 return new 191 else: 192 raise TypeError(f"Invalid key: {key!r}, {type(key)}\n") 193 194 return self._make_source_object(index) 195 196 def _make_source_object(self, index): 197 """Make one source object. 198 199 Parameters 200 ---------- 201 index : int 202 Row index 203 204 Returns 205 ------- 206 source : `SourceCatalogObject` 207 Source object 208 """ 209 data = table_row_to_dict(self.table[index]) 210 data[SourceCatalogObject._row_index_key] = index 211 212 hist_table = getattr(self, "hist_table", None) 213 hist2_table = getattr(self, "hist2_table", None) 214 215 if hist_table: 216 data["time_axis"] = TimeMapAxis.from_table(hist_table, format="fermi-fgl") 217 218 if hist2_table: 219 data["time_axis_2"] = TimeMapAxis.from_table( 220 hist2_table, format="fermi-fgl" 221 ) 222 223 if "Extended_Source_Name" in data: 224 name_extended = data["Extended_Source_Name"].strip() 225 elif "Source_Name" in data: 226 name_extended = data["Source_Name"].strip() 227 else: 228 name_extended = None 229 try: 230 idx = self._lookup_extended_source_idx[name_extended] 231 data_extended = table_row_to_dict(self.extended_sources_table[idx]) 232 except (KeyError, AttributeError): 233 data_extended = None 234 235 source = self.source_object_class(data, data_extended) 236 return source 237 238 @lazyproperty 239 def _lookup_extended_source_idx(self): 240 names = [_.strip() for _ in self.extended_sources_table["Source_Name"]] 241 idx = range(len(names)) 242 return dict(zip(names, idx)) 243 244 @property 245 def positions(self): 246 """Source positions (`~astropy.coordinates.SkyCoord`).""" 247 return _skycoord_from_table(self.table) 248 249 def to_models(self, **kwargs): 250 """Create Models object from catalogue""" 251 return Models([_.sky_model(**kwargs) for _ in self]) 252 253 254 def _skycoord_from_table(table): 255 keys = table.colnames 256 257 if {"RAJ2000", "DEJ2000"}.issubset(keys): 258 lon, lat, frame = "RAJ2000", "DEJ2000", "icrs" 259 elif {"RA", "DEC"}.issubset(keys): 260 lon, lat, frame = "RA", "DEC", "icrs" 261 elif {"ra", "dec"}.issubset(keys): 262 lon, lat, frame = "ra", "dec", "icrs" 263 else: 264 raise KeyError("No column GLON / GLAT or RA / DEC or RAJ2000 / DEJ2000 found.") 265 266 unit = table[lon].unit.to_string() if table[lon].unit else "deg" 267 268 return SkyCoord(table[lon], table[lat], unit=unit, frame=frame) 269 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gammapy/catalog/core.py b/gammapy/catalog/core.py --- a/gammapy/catalog/core.py +++ b/gammapy/catalog/core.py @@ -99,6 +99,11 @@ """Catalog description (str).""" pass + @property + @abc.abstractmethod + def tag(self): + pass + source_object_class = SourceCatalogObject """Source class (`SourceCatalogObject`).""" @@ -110,7 +115,7 @@ def __str__(self): return ( f"{self.__class__.__name__}:\n" - f" name: {self.name}\n" + f" name: {self.tag}\n" f" description: {self.description}\n" f" sources: {len(self.table)}\n" )
{"golden_diff": "diff --git a/gammapy/catalog/core.py b/gammapy/catalog/core.py\n--- a/gammapy/catalog/core.py\n+++ b/gammapy/catalog/core.py\n@@ -99,6 +99,11 @@\n \"\"\"Catalog description (str).\"\"\"\n pass\n \n+ @property\n+ @abc.abstractmethod\n+ def tag(self):\n+ pass\n+\n source_object_class = SourceCatalogObject\n \"\"\"Source class (`SourceCatalogObject`).\"\"\"\n \n@@ -110,7 +115,7 @@\n def __str__(self):\n return (\n f\"{self.__class__.__name__}:\\n\"\n- f\" name: {self.name}\\n\"\n+ f\" name: {self.tag}\\n\"\n f\" description: {self.description}\\n\"\n f\" sources: {len(self.table)}\\n\"\n )\n", "issue": "Error while printing catalogs\n```\r\nfrom gammapy.catalog import *\r\ncatalog = SourceCatalog4FGL()\r\nprint(catalog)\r\n```\r\n\r\ngives an error\r\n```\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n/var/folders/v4/62d6cvbj72qbyw23mq2pwxl00000gn/T/ipykernel_17380/3277138734.py in <module>\r\n 1 catalog = SourceCatalog4FGL()\r\n----> 2 print(catalog)\r\n\r\n~/Gammapy-dev/gammapy/gammapy/catalog/core.py in __str__(self)\r\n 110 def __str__(self):\r\n 111 return (\r\n--> 112 f\"{self.__class__.__name__}:\\n\"\r\n 113 f\" name: {self.name}\\n\"\r\n 114 f\" description: {self.description}\\n\"\r\n\r\nAttributeError: 'SourceCatalog4FGL' object has no attribute 'name'\r\n```\r\n\r\nThis happens for all catalogs\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Source catalog and object base classes.\"\"\"\nimport abc\nimport numbers\nfrom copy import deepcopy\nimport numpy as np\nfrom astropy.coordinates import SkyCoord\nfrom astropy.utils import lazyproperty\nfrom gammapy.maps import TimeMapAxis\nfrom gammapy.modeling.models import Models\nfrom gammapy.utils.table import table_from_row_data, table_row_to_dict\n\n__all__ = [\"SourceCatalog\", \"SourceCatalogObject\"]\n\n\n# https://pydanny.blogspot.com/2011/11/loving-bunch-class.html\nclass Bunch(dict):\n def __init__(self, **kw):\n dict.__init__(self, kw)\n self.__dict__.update(kw)\n\n\ndef format_flux_points_table(table):\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n\nclass SourceCatalogObject:\n \"\"\"Source catalog object.\n\n This class can be used directly, but it is mostly used as a\n base class for the other source catalog classes.\n\n The catalog data on this source is stored in the `source.data`\n attribute as a dict.\n\n The source catalog object is decoupled from the source catalog,\n it doesn't hold a reference back to it, except for a key\n ``_row_index`` of type ``int`` that links to the catalog table\n row the source information comes from.\n \"\"\"\n\n _source_name_key = \"Source_Name\"\n _row_index_key = \"_row_index\"\n\n def __init__(self, data, data_extended=None):\n self.data = Bunch(**data)\n if data_extended:\n self.data_extended = Bunch(**data_extended)\n\n @property\n def name(self):\n \"\"\"Source name (str)\"\"\"\n name = self.data[self._source_name_key]\n return name.strip()\n\n @property\n def row_index(self):\n \"\"\"Row index of source in catalog (int)\"\"\"\n return self.data[self._row_index_key]\n\n @property\n def position(self):\n \"\"\"Source position (`~astropy.coordinates.SkyCoord`).\"\"\"\n table = table_from_row_data([self.data])\n return _skycoord_from_table(table)[0]\n\n\nclass SourceCatalog(abc.ABC):\n \"\"\"Generic source catalog.\n\n This class can be used directly, but it is mostly used as a\n base class for the other source catalog classes.\n\n This is a thin wrapper around `~astropy.table.Table`,\n which is stored in the ``catalog.table`` attribute.\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with catalog data.\n source_name_key : str\n Column with source name information\n source_name_alias : tuple of str\n Columns with source name aliases. This will allow accessing the source\n row by alias names as well.\n \"\"\"\n\n @classmethod\n @abc.abstractmethod\n def description(cls):\n \"\"\"Catalog description (str).\"\"\"\n pass\n\n source_object_class = SourceCatalogObject\n \"\"\"Source class (`SourceCatalogObject`).\"\"\"\n\n def __init__(self, table, source_name_key=\"Source_Name\", source_name_alias=()):\n self.table = table\n self._source_name_key = source_name_key\n self._source_name_alias = source_name_alias\n\n def __str__(self):\n return (\n f\"{self.__class__.__name__}:\\n\"\n f\" name: {self.name}\\n\"\n f\" description: {self.description}\\n\"\n f\" sources: {len(self.table)}\\n\"\n )\n\n @lazyproperty\n def _name_to_index_cache(self):\n # Make a dict for quick lookup: source name -> row index\n names = {}\n for idx, row in enumerate(self.table):\n name = row[self._source_name_key]\n names[name.strip()] = idx\n for alias_column in self._source_name_alias:\n for alias in str(row[alias_column]).split(\",\"):\n if not alias == \"\":\n names[alias.strip()] = idx\n return names\n\n def row_index(self, name):\n \"\"\"Look up row index of source by name.\n\n Parameters\n ----------\n name : str\n Source name\n\n Returns\n -------\n index : int\n Row index of source in table\n \"\"\"\n index = self._name_to_index_cache[name]\n row = self.table[index]\n # check if name lookup is correct other wise recompute _name_to_index_cache\n\n possible_names = [row[self._source_name_key]]\n for alias_column in self._source_name_alias:\n possible_names += str(row[alias_column]).split(\",\")\n\n if name not in possible_names:\n self.__dict__.pop(\"_name_to_index_cache\")\n index = self._name_to_index_cache[name]\n\n return index\n\n def source_name(self, index):\n \"\"\"Look up source name by row index.\n\n Parameters\n ----------\n index : int\n Row index of source in table\n \"\"\"\n source_name_col = self.table[self._source_name_key]\n name = source_name_col[index]\n return name.strip()\n\n def __getitem__(self, key):\n \"\"\"Get source by name.\n\n Parameters\n ----------\n key : str or int\n Source name or row index\n\n Returns\n -------\n source : `SourceCatalogObject`\n An object representing one source\n \"\"\"\n if isinstance(key, str):\n index = self.row_index(key)\n elif isinstance(key, numbers.Integral):\n index = key\n elif isinstance(key, np.ndarray) and key.dtype == bool:\n new = deepcopy(self)\n new.table = self.table[key]\n return new\n else:\n raise TypeError(f\"Invalid key: {key!r}, {type(key)}\\n\")\n\n return self._make_source_object(index)\n\n def _make_source_object(self, index):\n \"\"\"Make one source object.\n\n Parameters\n ----------\n index : int\n Row index\n\n Returns\n -------\n source : `SourceCatalogObject`\n Source object\n \"\"\"\n data = table_row_to_dict(self.table[index])\n data[SourceCatalogObject._row_index_key] = index\n\n hist_table = getattr(self, \"hist_table\", None)\n hist2_table = getattr(self, \"hist2_table\", None)\n\n if hist_table:\n data[\"time_axis\"] = TimeMapAxis.from_table(hist_table, format=\"fermi-fgl\")\n\n if hist2_table:\n data[\"time_axis_2\"] = TimeMapAxis.from_table(\n hist2_table, format=\"fermi-fgl\"\n )\n\n if \"Extended_Source_Name\" in data:\n name_extended = data[\"Extended_Source_Name\"].strip()\n elif \"Source_Name\" in data:\n name_extended = data[\"Source_Name\"].strip()\n else:\n name_extended = None\n try:\n idx = self._lookup_extended_source_idx[name_extended]\n data_extended = table_row_to_dict(self.extended_sources_table[idx])\n except (KeyError, AttributeError):\n data_extended = None\n\n source = self.source_object_class(data, data_extended)\n return source\n\n @lazyproperty\n def _lookup_extended_source_idx(self):\n names = [_.strip() for _ in self.extended_sources_table[\"Source_Name\"]]\n idx = range(len(names))\n return dict(zip(names, idx))\n\n @property\n def positions(self):\n \"\"\"Source positions (`~astropy.coordinates.SkyCoord`).\"\"\"\n return _skycoord_from_table(self.table)\n\n def to_models(self, **kwargs):\n \"\"\"Create Models object from catalogue\"\"\"\n return Models([_.sky_model(**kwargs) for _ in self])\n\n\ndef _skycoord_from_table(table):\n keys = table.colnames\n\n if {\"RAJ2000\", \"DEJ2000\"}.issubset(keys):\n lon, lat, frame = \"RAJ2000\", \"DEJ2000\", \"icrs\"\n elif {\"RA\", \"DEC\"}.issubset(keys):\n lon, lat, frame = \"RA\", \"DEC\", \"icrs\"\n elif {\"ra\", \"dec\"}.issubset(keys):\n lon, lat, frame = \"ra\", \"dec\", \"icrs\"\n else:\n raise KeyError(\"No column GLON / GLAT or RA / DEC or RAJ2000 / DEJ2000 found.\")\n\n unit = table[lon].unit.to_string() if table[lon].unit else \"deg\"\n\n return SkyCoord(table[lon], table[lat], unit=unit, frame=frame)\n", "path": "gammapy/catalog/core.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Source catalog and object base classes.\"\"\"\nimport abc\nimport numbers\nfrom copy import deepcopy\nimport numpy as np\nfrom astropy.coordinates import SkyCoord\nfrom astropy.utils import lazyproperty\nfrom gammapy.maps import TimeMapAxis\nfrom gammapy.modeling.models import Models\nfrom gammapy.utils.table import table_from_row_data, table_row_to_dict\n\n__all__ = [\"SourceCatalog\", \"SourceCatalogObject\"]\n\n\n# https://pydanny.blogspot.com/2011/11/loving-bunch-class.html\nclass Bunch(dict):\n def __init__(self, **kw):\n dict.__init__(self, kw)\n self.__dict__.update(kw)\n\n\ndef format_flux_points_table(table):\n for column in table.colnames:\n if column.startswith((\"dnde\", \"eflux\", \"flux\", \"e2dnde\", \"ref\")):\n table[column].format = \".3e\"\n elif column.startswith(\n (\"e_min\", \"e_max\", \"e_ref\", \"sqrt_ts\", \"norm\", \"ts\", \"stat\")\n ):\n table[column].format = \".3f\"\n\n return table\n\n\nclass SourceCatalogObject:\n \"\"\"Source catalog object.\n\n This class can be used directly, but it is mostly used as a\n base class for the other source catalog classes.\n\n The catalog data on this source is stored in the `source.data`\n attribute as a dict.\n\n The source catalog object is decoupled from the source catalog,\n it doesn't hold a reference back to it, except for a key\n ``_row_index`` of type ``int`` that links to the catalog table\n row the source information comes from.\n \"\"\"\n\n _source_name_key = \"Source_Name\"\n _row_index_key = \"_row_index\"\n\n def __init__(self, data, data_extended=None):\n self.data = Bunch(**data)\n if data_extended:\n self.data_extended = Bunch(**data_extended)\n\n @property\n def name(self):\n \"\"\"Source name (str)\"\"\"\n name = self.data[self._source_name_key]\n return name.strip()\n\n @property\n def row_index(self):\n \"\"\"Row index of source in catalog (int)\"\"\"\n return self.data[self._row_index_key]\n\n @property\n def position(self):\n \"\"\"Source position (`~astropy.coordinates.SkyCoord`).\"\"\"\n table = table_from_row_data([self.data])\n return _skycoord_from_table(table)[0]\n\n\nclass SourceCatalog(abc.ABC):\n \"\"\"Generic source catalog.\n\n This class can be used directly, but it is mostly used as a\n base class for the other source catalog classes.\n\n This is a thin wrapper around `~astropy.table.Table`,\n which is stored in the ``catalog.table`` attribute.\n\n Parameters\n ----------\n table : `~astropy.table.Table`\n Table with catalog data.\n source_name_key : str\n Column with source name information\n source_name_alias : tuple of str\n Columns with source name aliases. This will allow accessing the source\n row by alias names as well.\n \"\"\"\n\n @classmethod\n @abc.abstractmethod\n def description(cls):\n \"\"\"Catalog description (str).\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def tag(self):\n pass\n\n source_object_class = SourceCatalogObject\n \"\"\"Source class (`SourceCatalogObject`).\"\"\"\n\n def __init__(self, table, source_name_key=\"Source_Name\", source_name_alias=()):\n self.table = table\n self._source_name_key = source_name_key\n self._source_name_alias = source_name_alias\n\n def __str__(self):\n return (\n f\"{self.__class__.__name__}:\\n\"\n f\" name: {self.tag}\\n\"\n f\" description: {self.description}\\n\"\n f\" sources: {len(self.table)}\\n\"\n )\n\n @lazyproperty\n def _name_to_index_cache(self):\n # Make a dict for quick lookup: source name -> row index\n names = {}\n for idx, row in enumerate(self.table):\n name = row[self._source_name_key]\n names[name.strip()] = idx\n for alias_column in self._source_name_alias:\n for alias in str(row[alias_column]).split(\",\"):\n if not alias == \"\":\n names[alias.strip()] = idx\n return names\n\n def row_index(self, name):\n \"\"\"Look up row index of source by name.\n\n Parameters\n ----------\n name : str\n Source name\n\n Returns\n -------\n index : int\n Row index of source in table\n \"\"\"\n index = self._name_to_index_cache[name]\n row = self.table[index]\n # check if name lookup is correct other wise recompute _name_to_index_cache\n\n possible_names = [row[self._source_name_key]]\n for alias_column in self._source_name_alias:\n possible_names += str(row[alias_column]).split(\",\")\n\n if name not in possible_names:\n self.__dict__.pop(\"_name_to_index_cache\")\n index = self._name_to_index_cache[name]\n\n return index\n\n def source_name(self, index):\n \"\"\"Look up source name by row index.\n\n Parameters\n ----------\n index : int\n Row index of source in table\n \"\"\"\n source_name_col = self.table[self._source_name_key]\n name = source_name_col[index]\n return name.strip()\n\n def __getitem__(self, key):\n \"\"\"Get source by name.\n\n Parameters\n ----------\n key : str or int\n Source name or row index\n\n Returns\n -------\n source : `SourceCatalogObject`\n An object representing one source\n \"\"\"\n if isinstance(key, str):\n index = self.row_index(key)\n elif isinstance(key, numbers.Integral):\n index = key\n elif isinstance(key, np.ndarray) and key.dtype == bool:\n new = deepcopy(self)\n new.table = self.table[key]\n return new\n else:\n raise TypeError(f\"Invalid key: {key!r}, {type(key)}\\n\")\n\n return self._make_source_object(index)\n\n def _make_source_object(self, index):\n \"\"\"Make one source object.\n\n Parameters\n ----------\n index : int\n Row index\n\n Returns\n -------\n source : `SourceCatalogObject`\n Source object\n \"\"\"\n data = table_row_to_dict(self.table[index])\n data[SourceCatalogObject._row_index_key] = index\n\n hist_table = getattr(self, \"hist_table\", None)\n hist2_table = getattr(self, \"hist2_table\", None)\n\n if hist_table:\n data[\"time_axis\"] = TimeMapAxis.from_table(hist_table, format=\"fermi-fgl\")\n\n if hist2_table:\n data[\"time_axis_2\"] = TimeMapAxis.from_table(\n hist2_table, format=\"fermi-fgl\"\n )\n\n if \"Extended_Source_Name\" in data:\n name_extended = data[\"Extended_Source_Name\"].strip()\n elif \"Source_Name\" in data:\n name_extended = data[\"Source_Name\"].strip()\n else:\n name_extended = None\n try:\n idx = self._lookup_extended_source_idx[name_extended]\n data_extended = table_row_to_dict(self.extended_sources_table[idx])\n except (KeyError, AttributeError):\n data_extended = None\n\n source = self.source_object_class(data, data_extended)\n return source\n\n @lazyproperty\n def _lookup_extended_source_idx(self):\n names = [_.strip() for _ in self.extended_sources_table[\"Source_Name\"]]\n idx = range(len(names))\n return dict(zip(names, idx))\n\n @property\n def positions(self):\n \"\"\"Source positions (`~astropy.coordinates.SkyCoord`).\"\"\"\n return _skycoord_from_table(self.table)\n\n def to_models(self, **kwargs):\n \"\"\"Create Models object from catalogue\"\"\"\n return Models([_.sky_model(**kwargs) for _ in self])\n\n\ndef _skycoord_from_table(table):\n keys = table.colnames\n\n if {\"RAJ2000\", \"DEJ2000\"}.issubset(keys):\n lon, lat, frame = \"RAJ2000\", \"DEJ2000\", \"icrs\"\n elif {\"RA\", \"DEC\"}.issubset(keys):\n lon, lat, frame = \"RA\", \"DEC\", \"icrs\"\n elif {\"ra\", \"dec\"}.issubset(keys):\n lon, lat, frame = \"ra\", \"dec\", \"icrs\"\n else:\n raise KeyError(\"No column GLON / GLAT or RA / DEC or RAJ2000 / DEJ2000 found.\")\n\n unit = table[lon].unit.to_string() if table[lon].unit else \"deg\"\n\n return SkyCoord(table[lon], table[lat], unit=unit, frame=frame)\n", "path": "gammapy/catalog/core.py"}]}
3,160
198
gh_patches_debug_33670
rasdani/github-patches
git_diff
getredash__redash-1792
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Alert notifications fail (sometime) with a SQLAlchemy error ### Issue Summary After migrating to 1.0.1 alerts are notified only sporadically to Slack (I cannot pinpoint exactly the conditions in which it happens). ### Steps to Reproduce 1. Create a failing alert for an existing query 2. Add your email and a Slack destination to your alert (the order seems to matter: add first the email destination then the slack destination) 3. Refresh the query such that the alert would trigger 4. You will receive an alert email but no slack alert 5. In the back-end `celery_error.log` you will see an error message like ``` [2017-04-18 13:13:58,184: ERROR/Worker-4] redash.tasks.check_alerts_for_query[fb4fee06-3318-44f7-8aaf-7984d792a793]: Error with processing destination Traceback (most recent call last): File "/opt/redash/redash.1.0.1.b2833/redash/tasks/alerts.py", line 23, in notify_subscriptions subscription.notify(alert, alert.query_rel, subscription.user, new_state, current_app, host) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 237, in __get__ return self.impl.get(instance_state(instance), dict_) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py", line 584, in get value = self.callable_(state, passive) File "/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/strategies.py", line 530, in _load_for_state (orm_util.state_str(state), self.key) DetachedInstanceError: Parent instance <AlertSubscription at 0x7fcd934f04d0> is not bound to a Session; lazy load operation of attribute 'user' cannot proceed ``` I think this error is caused by the `subscription.user` call. My guess is that it is eventually caused by [`models.db.session.commit()`](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L51). This "might" be connected to #1706 . ### Technical details: * Redash Version: 1.0.1+b2833 * Browser/OS: (not relevant, error is in the backend) * How did you install Redash: ec2 from ami (in 2015), recently upgraded from 0.12 to 1.0.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/tasks/general.py` Content: ``` 1 import requests 2 from celery.utils.log import get_task_logger 3 from flask_mail import Message 4 from redash.worker import celery 5 from redash.version_check import run_version_check 6 from redash import models, mail, settings 7 8 logger = get_task_logger(__name__) 9 10 11 @celery.task(name="redash.tasks.record_event") 12 def record_event(raw_event): 13 event = models.Event.record(raw_event) 14 models.db.session.commit() 15 16 for hook in settings.EVENT_REPORTING_WEBHOOKS: 17 logger.debug("Forwarding event to: %s", hook) 18 try: 19 data = { 20 "schema": "iglu:io.redash.webhooks/event/jsonschema/1-0-0", 21 "data": event.to_dict() 22 } 23 response = requests.post(hook, json=data) 24 if response.status_code != 200: 25 logger.error("Failed posting to %s: %s", hook, response.content) 26 except Exception: 27 logger.exception("Failed posting to %s", hook) 28 29 30 @celery.task(name="redash.tasks.version_check") 31 def version_check(): 32 run_version_check() 33 34 35 @celery.task(name="redash.tasks.subscribe") 36 def subscribe(form): 37 logger.info("Subscribing to: [security notifications=%s], [newsletter=%s]", form['security_notifications'], form['newsletter']) 38 data = { 39 'admin_name': form['name'], 40 'admin_email': form['email'], 41 'org_name': form['org_name'], 42 'security_notifications': form['security_notifications'], 43 'newsletter': form['newsletter'] 44 } 45 requests.post('https://beacon.redash.io/subscribe', json=data) 46 47 48 @celery.task(name="redash.tasks.send_mail") 49 def send_mail(to, subject, html, text): 50 from redash.wsgi import app 51 52 try: 53 with app.app_context(): 54 message = Message(recipients=to, 55 subject=subject, 56 html=html, 57 body=text) 58 59 mail.send(message) 60 except Exception: 61 logger.exception('Failed sending message: %s', message.subject) 62 ``` Path: `redash/destinations/email.py` Content: ``` 1 import logging 2 3 from flask_mail import Message 4 from redash import mail, settings 5 from redash.destinations import * 6 7 8 class Email(BaseDestination): 9 10 @classmethod 11 def configuration_schema(cls): 12 return { 13 "type": "object", 14 "properties": { 15 "addresses": { 16 "type": "string" 17 }, 18 "subject_template": { 19 "type": "string", 20 "default": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE, 21 "title": "Subject Template" 22 } 23 }, 24 "required": ["addresses"] 25 } 26 27 @classmethod 28 def icon(cls): 29 return 'fa-envelope' 30 31 def notify(self, alert, query, user, new_state, app, host, options): 32 recipients = [email for email in options.get('addresses', '').split(',') if email] 33 34 if not recipients: 35 logging.warning("No emails given. Skipping send.") 36 37 html = """ 38 Check <a href="{host}/alerts/{alert_id}">alert</a> / check <a href="{host}/queries/{query_id}">query</a>. 39 """.format(host=host, alert_id=alert.id, query_id=query.id) 40 logging.debug("Notifying: %s", recipients) 41 42 try: 43 with app.app_context(): 44 alert_name = alert.name.encode('utf-8', 'ignore') 45 state = new_state.upper() 46 subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) 47 message = Message( 48 recipients=recipients, 49 subject=subject_template.format(alert_name=alert_name, state=state), 50 html=html 51 ) 52 mail.send(message) 53 except Exception: 54 logging.exception("Mail send error.") 55 56 register(Email) 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/destinations/email.py b/redash/destinations/email.py --- a/redash/destinations/email.py +++ b/redash/destinations/email.py @@ -40,16 +40,15 @@ logging.debug("Notifying: %s", recipients) try: - with app.app_context(): - alert_name = alert.name.encode('utf-8', 'ignore') - state = new_state.upper() - subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) - message = Message( - recipients=recipients, - subject=subject_template.format(alert_name=alert_name, state=state), - html=html - ) - mail.send(message) + alert_name = alert.name.encode('utf-8', 'ignore') + state = new_state.upper() + subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE) + message = Message( + recipients=recipients, + subject=subject_template.format(alert_name=alert_name, state=state), + html=html + ) + mail.send(message) except Exception: logging.exception("Mail send error.") diff --git a/redash/tasks/general.py b/redash/tasks/general.py --- a/redash/tasks/general.py +++ b/redash/tasks/general.py @@ -1,9 +1,10 @@ import requests + from celery.utils.log import get_task_logger from flask_mail import Message -from redash.worker import celery +from redash import mail, models, settings from redash.version_check import run_version_check -from redash import models, mail, settings +from redash.worker import celery logger = get_task_logger(__name__) @@ -50,12 +51,11 @@ from redash.wsgi import app try: - with app.app_context(): - message = Message(recipients=to, - subject=subject, - html=html, - body=text) + message = Message(recipients=to, + subject=subject, + html=html, + body=text) - mail.send(message) + mail.send(message) except Exception: logger.exception('Failed sending message: %s', message.subject)
{"golden_diff": "diff --git a/redash/destinations/email.py b/redash/destinations/email.py\n--- a/redash/destinations/email.py\n+++ b/redash/destinations/email.py\n@@ -40,16 +40,15 @@\n logging.debug(\"Notifying: %s\", recipients)\n \n try:\n- with app.app_context():\n- alert_name = alert.name.encode('utf-8', 'ignore')\n- state = new_state.upper()\n- subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n- message = Message(\n- recipients=recipients,\n- subject=subject_template.format(alert_name=alert_name, state=state),\n- html=html\n- )\n- mail.send(message)\n+ alert_name = alert.name.encode('utf-8', 'ignore')\n+ state = new_state.upper()\n+ subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n+ message = Message(\n+ recipients=recipients,\n+ subject=subject_template.format(alert_name=alert_name, state=state),\n+ html=html\n+ )\n+ mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n \ndiff --git a/redash/tasks/general.py b/redash/tasks/general.py\n--- a/redash/tasks/general.py\n+++ b/redash/tasks/general.py\n@@ -1,9 +1,10 @@\n import requests\n+\n from celery.utils.log import get_task_logger\n from flask_mail import Message\n-from redash.worker import celery\n+from redash import mail, models, settings\n from redash.version_check import run_version_check\n-from redash import models, mail, settings\n+from redash.worker import celery\n \n logger = get_task_logger(__name__)\n \n@@ -50,12 +51,11 @@\n from redash.wsgi import app\n \n try:\n- with app.app_context():\n- message = Message(recipients=to,\n- subject=subject,\n- html=html,\n- body=text)\n+ message = Message(recipients=to,\n+ subject=subject,\n+ html=html,\n+ body=text)\n \n- mail.send(message)\n+ mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "issue": "Alert notifications fail (sometime) with a SQLAlchemy error\n### Issue Summary\r\n\r\nAfter migrating to 1.0.1 alerts are notified only sporadically to Slack (I cannot pinpoint exactly the conditions in which it happens).\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a failing alert for an existing query\r\n2. Add your email and a Slack destination to your alert (the order seems to matter: add first the email destination then the slack destination)\r\n3. Refresh the query such that the alert would trigger\r\n4. You will receive an alert email but no slack alert\r\n5. In the back-end `celery_error.log` you will see an error message like\r\n```\r\n[2017-04-18 13:13:58,184: ERROR/Worker-4] redash.tasks.check_alerts_for_query[fb4fee06-3318-44f7-8aaf-7984d792a793]: Error with processing destination\r\nTraceback (most recent call last):\r\n File \"/opt/redash/redash.1.0.1.b2833/redash/tasks/alerts.py\", line 23, in notify_subscriptions\r\n subscription.notify(alert, alert.query_rel, subscription.user, new_state, current_app, host)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py\", line 237, in __get__\r\n return self.impl.get(instance_state(instance), dict_)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/attributes.py\", line 584, in get\r\n value = self.callable_(state, passive)\r\n File \"/usr/local/lib/python2.7/dist-packages/sqlalchemy/orm/strategies.py\", line 530, in _load_for_state\r\n (orm_util.state_str(state), self.key)\r\nDetachedInstanceError: Parent instance <AlertSubscription at 0x7fcd934f04d0> is not bound to a Session; lazy load operation of attribute 'user' cannot proceed\r\n```\r\n\r\nI think this error is caused by the `subscription.user` call.\r\nMy guess is that it is eventually caused by [`models.db.session.commit()`](https://github.com/getredash/redash/blob/master/redash/tasks/alerts.py#L51). \r\n\r\nThis \"might\" be connected to #1706 .\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.1+b2833\r\n* Browser/OS: (not relevant, error is in the backend)\r\n* How did you install Redash: ec2 from ami (in 2015), recently upgraded from 0.12 to 1.0.1\r\n\n", "before_files": [{"content": "import requests\nfrom celery.utils.log import get_task_logger\nfrom flask_mail import Message\nfrom redash.worker import celery\nfrom redash.version_check import run_version_check\nfrom redash import models, mail, settings\n\nlogger = get_task_logger(__name__)\n\n\[email protected](name=\"redash.tasks.record_event\")\ndef record_event(raw_event):\n event = models.Event.record(raw_event)\n models.db.session.commit()\n\n for hook in settings.EVENT_REPORTING_WEBHOOKS:\n logger.debug(\"Forwarding event to: %s\", hook)\n try:\n data = {\n \"schema\": \"iglu:io.redash.webhooks/event/jsonschema/1-0-0\",\n \"data\": event.to_dict()\n }\n response = requests.post(hook, json=data)\n if response.status_code != 200:\n logger.error(\"Failed posting to %s: %s\", hook, response.content)\n except Exception:\n logger.exception(\"Failed posting to %s\", hook)\n\n\[email protected](name=\"redash.tasks.version_check\")\ndef version_check():\n run_version_check()\n\n\[email protected](name=\"redash.tasks.subscribe\")\ndef subscribe(form):\n logger.info(\"Subscribing to: [security notifications=%s], [newsletter=%s]\", form['security_notifications'], form['newsletter'])\n data = {\n 'admin_name': form['name'],\n 'admin_email': form['email'],\n 'org_name': form['org_name'],\n 'security_notifications': form['security_notifications'],\n 'newsletter': form['newsletter']\n }\n requests.post('https://beacon.redash.io/subscribe', json=data)\n\n\[email protected](name=\"redash.tasks.send_mail\")\ndef send_mail(to, subject, html, text):\n from redash.wsgi import app\n\n try:\n with app.app_context():\n message = Message(recipients=to,\n subject=subject,\n html=html,\n body=text)\n\n mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "path": "redash/tasks/general.py"}, {"content": "import logging\n\nfrom flask_mail import Message\nfrom redash import mail, settings\nfrom redash.destinations import *\n\n\nclass Email(BaseDestination):\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"addresses\": {\n \"type\": \"string\"\n },\n \"subject_template\": {\n \"type\": \"string\",\n \"default\": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE,\n \"title\": \"Subject Template\"\n }\n },\n \"required\": [\"addresses\"]\n }\n\n @classmethod\n def icon(cls):\n return 'fa-envelope'\n\n def notify(self, alert, query, user, new_state, app, host, options):\n recipients = [email for email in options.get('addresses', '').split(',') if email]\n\n if not recipients:\n logging.warning(\"No emails given. Skipping send.\")\n\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=host, alert_id=alert.id, query_id=query.id)\n logging.debug(\"Notifying: %s\", recipients)\n\n try:\n with app.app_context():\n alert_name = alert.name.encode('utf-8', 'ignore')\n state = new_state.upper()\n subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n message = Message(\n recipients=recipients,\n subject=subject_template.format(alert_name=alert_name, state=state),\n html=html\n )\n mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n\nregister(Email)\n", "path": "redash/destinations/email.py"}], "after_files": [{"content": "import requests\n\nfrom celery.utils.log import get_task_logger\nfrom flask_mail import Message\nfrom redash import mail, models, settings\nfrom redash.version_check import run_version_check\nfrom redash.worker import celery\n\nlogger = get_task_logger(__name__)\n\n\[email protected](name=\"redash.tasks.record_event\")\ndef record_event(raw_event):\n event = models.Event.record(raw_event)\n models.db.session.commit()\n\n for hook in settings.EVENT_REPORTING_WEBHOOKS:\n logger.debug(\"Forwarding event to: %s\", hook)\n try:\n data = {\n \"schema\": \"iglu:io.redash.webhooks/event/jsonschema/1-0-0\",\n \"data\": event.to_dict()\n }\n response = requests.post(hook, json=data)\n if response.status_code != 200:\n logger.error(\"Failed posting to %s: %s\", hook, response.content)\n except Exception:\n logger.exception(\"Failed posting to %s\", hook)\n\n\[email protected](name=\"redash.tasks.version_check\")\ndef version_check():\n run_version_check()\n\n\[email protected](name=\"redash.tasks.subscribe\")\ndef subscribe(form):\n logger.info(\"Subscribing to: [security notifications=%s], [newsletter=%s]\", form['security_notifications'], form['newsletter'])\n data = {\n 'admin_name': form['name'],\n 'admin_email': form['email'],\n 'org_name': form['org_name'],\n 'security_notifications': form['security_notifications'],\n 'newsletter': form['newsletter']\n }\n requests.post('https://beacon.redash.io/subscribe', json=data)\n\n\[email protected](name=\"redash.tasks.send_mail\")\ndef send_mail(to, subject, html, text):\n from redash.wsgi import app\n\n try:\n message = Message(recipients=to,\n subject=subject,\n html=html,\n body=text)\n\n mail.send(message)\n except Exception:\n logger.exception('Failed sending message: %s', message.subject)\n", "path": "redash/tasks/general.py"}, {"content": "import logging\n\nfrom flask_mail import Message\nfrom redash import mail, settings\nfrom redash.destinations import *\n\n\nclass Email(BaseDestination):\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"addresses\": {\n \"type\": \"string\"\n },\n \"subject_template\": {\n \"type\": \"string\",\n \"default\": settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE,\n \"title\": \"Subject Template\"\n }\n },\n \"required\": [\"addresses\"]\n }\n\n @classmethod\n def icon(cls):\n return 'fa-envelope'\n\n def notify(self, alert, query, user, new_state, app, host, options):\n recipients = [email for email in options.get('addresses', '').split(',') if email]\n\n if not recipients:\n logging.warning(\"No emails given. Skipping send.\")\n\n html = \"\"\"\n Check <a href=\"{host}/alerts/{alert_id}\">alert</a> / check <a href=\"{host}/queries/{query_id}\">query</a>.\n \"\"\".format(host=host, alert_id=alert.id, query_id=query.id)\n logging.debug(\"Notifying: %s\", recipients)\n\n try:\n alert_name = alert.name.encode('utf-8', 'ignore')\n state = new_state.upper()\n subject_template = options.get('subject_template', settings.ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE)\n message = Message(\n recipients=recipients,\n subject=subject_template.format(alert_name=alert_name, state=state),\n html=html\n )\n mail.send(message)\n except Exception:\n logging.exception(\"Mail send error.\")\n\nregister(Email)\n", "path": "redash/destinations/email.py"}]}
1,901
503
gh_patches_debug_24412
rasdani/github-patches
git_diff
quantumlib__Cirq-3339
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Moment should have `operation_at` There is a method called this on `Circuit`. It would make sense to live on `Moment` and then the circuit method could dispatch to it --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/ops/moment.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """A simplified time-slice of operations within a sequenced circuit.""" 16 17 from typing import (Any, Callable, Dict, FrozenSet, Iterable, Iterator, 18 overload, Tuple, TYPE_CHECKING, TypeVar, Union) 19 from cirq import protocols 20 from cirq._compat import deprecated_parameter 21 from cirq.ops import raw_types 22 23 if TYPE_CHECKING: 24 import cirq 25 26 TSelf_Moment = TypeVar('TSelf_Moment', bound='Moment') 27 28 29 class Moment: 30 """A time-slice of operations within a circuit. 31 32 Grouping operations into moments is intended to be a strong suggestion to 33 whatever is scheduling operations on real hardware. Operations in the same 34 moment should execute at the same time (to the extent possible; not all 35 operations have the same duration) and it is expected that all operations 36 in a moment should be completed before beginning the next moment. 37 38 Moment can be indexed by qubit or list of qubits: 39 moment[qubit] returns the Operation in the moment which touches the 40 given qubit, or throws KeyError if there is no such operation. 41 moment[qubits] returns another Moment which consists only of those 42 operations which touch at least one of the given qubits. If there 43 are no such operations, returns an empty Moment. 44 """ 45 46 @deprecated_parameter( 47 deadline='v0.9', 48 fix='Don\'t specify a keyword.', 49 match=lambda _, kwargs: 'operations' in kwargs, 50 parameter_desc='operations', 51 rewrite=lambda args, kwargs: (args + (kwargs['operations'],), {})) 52 def __init__(self, *contents: 'cirq.OP_TREE') -> None: 53 """Constructs a moment with the given operations. 54 55 Args: 56 operations: The operations applied within the moment. 57 Will be flattened and frozen into a tuple before storing. 58 59 Raises: 60 ValueError: A qubit appears more than once. 61 """ 62 from cirq.ops import op_tree 63 self._operations = tuple(op_tree.flatten_to_ops(contents)) 64 65 # Check that operations don't overlap. 66 affected_qubits = [q for op in self.operations for q in op.qubits] 67 self._qubits = frozenset(affected_qubits) 68 if len(affected_qubits) != len(self._qubits): 69 raise ValueError( 70 'Overlapping operations: {}'.format(self.operations)) 71 72 @property 73 def operations(self) -> Tuple['cirq.Operation', ...]: 74 return self._operations 75 76 @property 77 def qubits(self) -> FrozenSet['cirq.Qid']: 78 return self._qubits 79 80 def operates_on_single_qubit(self, qubit: 'cirq.Qid') -> bool: 81 """Determines if the moment has operations touching the given qubit. 82 Args: 83 qubit: The qubit that may or may not be touched by operations. 84 Returns: 85 Whether this moment has operations involving the qubit. 86 """ 87 return qubit in self.qubits 88 89 def operates_on(self, qubits: Iterable['cirq.Qid']) -> bool: 90 """Determines if the moment has operations touching the given qubits. 91 92 Args: 93 qubits: The qubits that may or may not be touched by operations. 94 95 Returns: 96 Whether this moment has operations involving the qubits. 97 """ 98 return bool(set(qubits) & self.qubits) 99 100 def with_operation(self, operation: 'cirq.Operation') -> 'cirq.Moment': 101 """Returns an equal moment, but with the given op added. 102 103 Args: 104 operation: The operation to append. 105 106 Returns: 107 The new moment. 108 """ 109 if any(q in self._qubits for q in operation.qubits): 110 raise ValueError('Overlapping operations: {}'.format(operation)) 111 112 # Use private variables to facilitate a quick copy 113 m = Moment() 114 m._operations = self.operations + (operation,) 115 m._qubits = frozenset(self._qubits.union(set(operation.qubits))) 116 117 return m 118 119 def with_operations(self, *contents: 'cirq.OP_TREE') -> 'cirq.Moment': 120 """Returns a new moment with the given contents added. 121 122 Args: 123 contents: New operations to add to this moment. 124 125 Returns: 126 The new moment. 127 """ 128 from cirq.ops import op_tree 129 130 operations = list(self._operations) 131 qubits = set(self._qubits) 132 for op in op_tree.flatten_to_ops(contents): 133 if any(q in qubits for q in op.qubits): 134 raise ValueError('Overlapping operations: {}'.format(op)) 135 operations.append(op) 136 qubits.update(op.qubits) 137 138 # Use private variables to facilitate a quick copy 139 m = Moment() 140 m._operations = tuple(operations) 141 m._qubits = frozenset(qubits) 142 143 return m 144 145 def without_operations_touching(self, qubits: Iterable['cirq.Qid'] 146 ) -> 'cirq.Moment': 147 """Returns an equal moment, but without ops on the given qubits. 148 149 Args: 150 qubits: Operations that touch these will be removed. 151 152 Returns: 153 The new moment. 154 """ 155 qubits = frozenset(qubits) 156 if not self.operates_on(qubits): 157 return self 158 return Moment( 159 operation for operation in self.operations 160 if qubits.isdisjoint(frozenset(operation.qubits))) 161 162 def _operation_touching(self, qubit: raw_types.Qid) -> 'cirq.Operation': 163 """Returns the operation touching given qubit. 164 Args: 165 qubit: Operations that touch this qubit will be returned. 166 Returns: 167 The operation which touches `qubit`. 168 """ 169 for op in self.operations: 170 if qubit in op.qubits: 171 return op 172 raise KeyError("Moment doesn't act on given qubit") 173 174 def __copy__(self): 175 return type(self)(self.operations) 176 177 def __bool__(self) -> bool: 178 return bool(self.operations) 179 180 def __eq__(self, other) -> bool: 181 if not isinstance(other, type(self)): 182 return NotImplemented 183 184 return (sorted(self.operations, key=lambda op: op.qubits) == sorted( 185 other.operations, key=lambda op: op.qubits)) 186 187 def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool: 188 """See `cirq.protocols.SupportsApproximateEquality`.""" 189 if not isinstance(other, type(self)): 190 return NotImplemented 191 192 return protocols.approx_eq(sorted(self.operations, 193 key=lambda op: op.qubits), 194 sorted(other.operations, 195 key=lambda op: op.qubits), 196 atol=atol) 197 198 def __ne__(self, other) -> bool: 199 return not self == other 200 201 def __hash__(self): 202 return hash( 203 (Moment, tuple(sorted(self.operations, key=lambda op: op.qubits)))) 204 205 def __iter__(self) -> Iterator['cirq.Operation']: 206 return iter(self.operations) 207 208 def __pow__(self, power): 209 if power == 1: 210 return self 211 new_ops = [] 212 for op in self.operations: 213 new_op = protocols.pow(op, power, default=None) 214 if new_op is None: 215 return NotImplemented 216 new_ops.append(new_op) 217 return Moment(new_ops) 218 219 def __len__(self) -> int: 220 return len(self.operations) 221 222 def __repr__(self) -> str: 223 if not self.operations: 224 return 'cirq.Moment()' 225 226 block = '\n'.join([repr(op) + ',' for op in self.operations]) 227 indented = ' ' + '\n '.join(block.split('\n')) 228 229 return f'cirq.Moment(\n{indented}\n)' 230 231 def __str__(self) -> str: 232 return ' and '.join(str(op) for op in self.operations) 233 234 def transform_qubits(self: TSelf_Moment, 235 func: Callable[['cirq.Qid'], 'cirq.Qid'] 236 ) -> TSelf_Moment: 237 """Returns the same moment, but with different qubits. 238 239 Args: 240 func: The function to use to turn each current qubit into a desired 241 new qubit. 242 243 Returns: 244 The receiving moment but with qubits transformed by the given 245 function. 246 """ 247 return self.__class__(op.transform_qubits(func) 248 for op in self.operations) 249 250 def _json_dict_(self) -> Dict[str, Any]: 251 return protocols.obj_to_dict_helper(self, ['operations']) 252 253 @classmethod 254 def _from_json_dict_(cls, operations, **kwargs): 255 return Moment(operations) 256 257 def __add__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment': 258 from cirq.circuits import circuit 259 if isinstance(other, circuit.Circuit): 260 return NotImplemented # Delegate to Circuit.__radd__. 261 return self.with_operations(other) 262 263 def __sub__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment': 264 from cirq.ops import op_tree 265 must_remove = set(op_tree.flatten_to_ops(other)) 266 new_ops = [] 267 for op in self.operations: 268 if op in must_remove: 269 must_remove.remove(op) 270 else: 271 new_ops.append(op) 272 if must_remove: 273 raise ValueError(f"Subtracted missing operations from a moment.\n" 274 f"Missing operations: {must_remove!r}\n" 275 f"Moment: {self!r}") 276 return Moment(new_ops) 277 278 # pylint: disable=function-redefined 279 @overload 280 def __getitem__(self, key: raw_types.Qid) -> 'cirq.Operation': 281 pass 282 283 @overload 284 def __getitem__(self, key: Iterable[raw_types.Qid]) -> 'cirq.Moment': 285 pass 286 287 def __getitem__(self, key): 288 if isinstance(key, raw_types.Qid): 289 return self._operation_touching(key) 290 elif isinstance(key, Iterable): 291 qubits_to_keep = frozenset(key) 292 ops_to_keep = tuple( 293 op for op in self.operations 294 if not qubits_to_keep.isdisjoint(frozenset(op.qubits))) 295 return Moment(ops_to_keep) 296 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/ops/moment.py b/cirq/ops/moment.py --- a/cirq/ops/moment.py +++ b/cirq/ops/moment.py @@ -15,7 +15,7 @@ """A simplified time-slice of operations within a sequenced circuit.""" from typing import (Any, Callable, Dict, FrozenSet, Iterable, Iterator, - overload, Tuple, TYPE_CHECKING, TypeVar, Union) + overload, Optional, Tuple, TYPE_CHECKING, TypeVar, Union) from cirq import protocols from cirq._compat import deprecated_parameter from cirq.ops import raw_types @@ -97,6 +97,21 @@ """ return bool(set(qubits) & self.qubits) + def operation_at(self, qubit: raw_types.Qid) -> Optional['cirq.Operation']: + """Returns the operation on a certain qubit for the moment. + + Args: + qubit: The qubit on which the returned Operation operates + on. + + Returns: + The operation that operates on the qubit for that moment. + """ + if self.operates_on([qubit]): + return self.__getitem__(qubit) + else: + return None + def with_operation(self, operation: 'cirq.Operation') -> 'cirq.Moment': """Returns an equal moment, but with the given op added.
{"golden_diff": "diff --git a/cirq/ops/moment.py b/cirq/ops/moment.py\n--- a/cirq/ops/moment.py\n+++ b/cirq/ops/moment.py\n@@ -15,7 +15,7 @@\n \"\"\"A simplified time-slice of operations within a sequenced circuit.\"\"\"\n \n from typing import (Any, Callable, Dict, FrozenSet, Iterable, Iterator,\n- overload, Tuple, TYPE_CHECKING, TypeVar, Union)\n+ overload, Optional, Tuple, TYPE_CHECKING, TypeVar, Union)\n from cirq import protocols\n from cirq._compat import deprecated_parameter\n from cirq.ops import raw_types\n@@ -97,6 +97,21 @@\n \"\"\"\n return bool(set(qubits) & self.qubits)\n \n+ def operation_at(self, qubit: raw_types.Qid) -> Optional['cirq.Operation']:\n+ \"\"\"Returns the operation on a certain qubit for the moment.\n+\n+ Args:\n+ qubit: The qubit on which the returned Operation operates\n+ on.\n+\n+ Returns:\n+ The operation that operates on the qubit for that moment.\n+ \"\"\"\n+ if self.operates_on([qubit]):\n+ return self.__getitem__(qubit)\n+ else:\n+ return None\n+\n def with_operation(self, operation: 'cirq.Operation') -> 'cirq.Moment':\n \"\"\"Returns an equal moment, but with the given op added.\n", "issue": "Moment should have `operation_at`\nThere is a method called this on `Circuit`. It would make sense to live on `Moment` and then the circuit method could dispatch to it\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simplified time-slice of operations within a sequenced circuit.\"\"\"\n\nfrom typing import (Any, Callable, Dict, FrozenSet, Iterable, Iterator,\n overload, Tuple, TYPE_CHECKING, TypeVar, Union)\nfrom cirq import protocols\nfrom cirq._compat import deprecated_parameter\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\nTSelf_Moment = TypeVar('TSelf_Moment', bound='Moment')\n\n\nclass Moment:\n \"\"\"A time-slice of operations within a circuit.\n\n Grouping operations into moments is intended to be a strong suggestion to\n whatever is scheduling operations on real hardware. Operations in the same\n moment should execute at the same time (to the extent possible; not all\n operations have the same duration) and it is expected that all operations\n in a moment should be completed before beginning the next moment.\n\n Moment can be indexed by qubit or list of qubits:\n moment[qubit] returns the Operation in the moment which touches the\n given qubit, or throws KeyError if there is no such operation.\n moment[qubits] returns another Moment which consists only of those\n operations which touch at least one of the given qubits. If there\n are no such operations, returns an empty Moment.\n \"\"\"\n\n @deprecated_parameter(\n deadline='v0.9',\n fix='Don\\'t specify a keyword.',\n match=lambda _, kwargs: 'operations' in kwargs,\n parameter_desc='operations',\n rewrite=lambda args, kwargs: (args + (kwargs['operations'],), {}))\n def __init__(self, *contents: 'cirq.OP_TREE') -> None:\n \"\"\"Constructs a moment with the given operations.\n\n Args:\n operations: The operations applied within the moment.\n Will be flattened and frozen into a tuple before storing.\n\n Raises:\n ValueError: A qubit appears more than once.\n \"\"\"\n from cirq.ops import op_tree\n self._operations = tuple(op_tree.flatten_to_ops(contents))\n\n # Check that operations don't overlap.\n affected_qubits = [q for op in self.operations for q in op.qubits]\n self._qubits = frozenset(affected_qubits)\n if len(affected_qubits) != len(self._qubits):\n raise ValueError(\n 'Overlapping operations: {}'.format(self.operations))\n\n @property\n def operations(self) -> Tuple['cirq.Operation', ...]:\n return self._operations\n\n @property\n def qubits(self) -> FrozenSet['cirq.Qid']:\n return self._qubits\n\n def operates_on_single_qubit(self, qubit: 'cirq.Qid') -> bool:\n \"\"\"Determines if the moment has operations touching the given qubit.\n Args:\n qubit: The qubit that may or may not be touched by operations.\n Returns:\n Whether this moment has operations involving the qubit.\n \"\"\"\n return qubit in self.qubits\n\n def operates_on(self, qubits: Iterable['cirq.Qid']) -> bool:\n \"\"\"Determines if the moment has operations touching the given qubits.\n\n Args:\n qubits: The qubits that may or may not be touched by operations.\n\n Returns:\n Whether this moment has operations involving the qubits.\n \"\"\"\n return bool(set(qubits) & self.qubits)\n\n def with_operation(self, operation: 'cirq.Operation') -> 'cirq.Moment':\n \"\"\"Returns an equal moment, but with the given op added.\n\n Args:\n operation: The operation to append.\n\n Returns:\n The new moment.\n \"\"\"\n if any(q in self._qubits for q in operation.qubits):\n raise ValueError('Overlapping operations: {}'.format(operation))\n\n # Use private variables to facilitate a quick copy\n m = Moment()\n m._operations = self.operations + (operation,)\n m._qubits = frozenset(self._qubits.union(set(operation.qubits)))\n\n return m\n\n def with_operations(self, *contents: 'cirq.OP_TREE') -> 'cirq.Moment':\n \"\"\"Returns a new moment with the given contents added.\n\n Args:\n contents: New operations to add to this moment.\n\n Returns:\n The new moment.\n \"\"\"\n from cirq.ops import op_tree\n\n operations = list(self._operations)\n qubits = set(self._qubits)\n for op in op_tree.flatten_to_ops(contents):\n if any(q in qubits for q in op.qubits):\n raise ValueError('Overlapping operations: {}'.format(op))\n operations.append(op)\n qubits.update(op.qubits)\n\n # Use private variables to facilitate a quick copy\n m = Moment()\n m._operations = tuple(operations)\n m._qubits = frozenset(qubits)\n\n return m\n\n def without_operations_touching(self, qubits: Iterable['cirq.Qid']\n ) -> 'cirq.Moment':\n \"\"\"Returns an equal moment, but without ops on the given qubits.\n\n Args:\n qubits: Operations that touch these will be removed.\n\n Returns:\n The new moment.\n \"\"\"\n qubits = frozenset(qubits)\n if not self.operates_on(qubits):\n return self\n return Moment(\n operation for operation in self.operations\n if qubits.isdisjoint(frozenset(operation.qubits)))\n\n def _operation_touching(self, qubit: raw_types.Qid) -> 'cirq.Operation':\n \"\"\"Returns the operation touching given qubit.\n Args:\n qubit: Operations that touch this qubit will be returned.\n Returns:\n The operation which touches `qubit`.\n \"\"\"\n for op in self.operations:\n if qubit in op.qubits:\n return op\n raise KeyError(\"Moment doesn't act on given qubit\")\n\n def __copy__(self):\n return type(self)(self.operations)\n\n def __bool__(self) -> bool:\n return bool(self.operations)\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return (sorted(self.operations, key=lambda op: op.qubits) == sorted(\n other.operations, key=lambda op: op.qubits))\n\n def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool:\n \"\"\"See `cirq.protocols.SupportsApproximateEquality`.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return protocols.approx_eq(sorted(self.operations,\n key=lambda op: op.qubits),\n sorted(other.operations,\n key=lambda op: op.qubits),\n atol=atol)\n\n def __ne__(self, other) -> bool:\n return not self == other\n\n def __hash__(self):\n return hash(\n (Moment, tuple(sorted(self.operations, key=lambda op: op.qubits))))\n\n def __iter__(self) -> Iterator['cirq.Operation']:\n return iter(self.operations)\n\n def __pow__(self, power):\n if power == 1:\n return self\n new_ops = []\n for op in self.operations:\n new_op = protocols.pow(op, power, default=None)\n if new_op is None:\n return NotImplemented\n new_ops.append(new_op)\n return Moment(new_ops)\n\n def __len__(self) -> int:\n return len(self.operations)\n\n def __repr__(self) -> str:\n if not self.operations:\n return 'cirq.Moment()'\n\n block = '\\n'.join([repr(op) + ',' for op in self.operations])\n indented = ' ' + '\\n '.join(block.split('\\n'))\n\n return f'cirq.Moment(\\n{indented}\\n)'\n\n def __str__(self) -> str:\n return ' and '.join(str(op) for op in self.operations)\n\n def transform_qubits(self: TSelf_Moment,\n func: Callable[['cirq.Qid'], 'cirq.Qid']\n ) -> TSelf_Moment:\n \"\"\"Returns the same moment, but with different qubits.\n\n Args:\n func: The function to use to turn each current qubit into a desired\n new qubit.\n\n Returns:\n The receiving moment but with qubits transformed by the given\n function.\n \"\"\"\n return self.__class__(op.transform_qubits(func)\n for op in self.operations)\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['operations'])\n\n @classmethod\n def _from_json_dict_(cls, operations, **kwargs):\n return Moment(operations)\n\n def __add__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment':\n from cirq.circuits import circuit\n if isinstance(other, circuit.Circuit):\n return NotImplemented # Delegate to Circuit.__radd__.\n return self.with_operations(other)\n\n def __sub__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment':\n from cirq.ops import op_tree\n must_remove = set(op_tree.flatten_to_ops(other))\n new_ops = []\n for op in self.operations:\n if op in must_remove:\n must_remove.remove(op)\n else:\n new_ops.append(op)\n if must_remove:\n raise ValueError(f\"Subtracted missing operations from a moment.\\n\"\n f\"Missing operations: {must_remove!r}\\n\"\n f\"Moment: {self!r}\")\n return Moment(new_ops)\n\n # pylint: disable=function-redefined\n @overload\n def __getitem__(self, key: raw_types.Qid) -> 'cirq.Operation':\n pass\n\n @overload\n def __getitem__(self, key: Iterable[raw_types.Qid]) -> 'cirq.Moment':\n pass\n\n def __getitem__(self, key):\n if isinstance(key, raw_types.Qid):\n return self._operation_touching(key)\n elif isinstance(key, Iterable):\n qubits_to_keep = frozenset(key)\n ops_to_keep = tuple(\n op for op in self.operations\n if not qubits_to_keep.isdisjoint(frozenset(op.qubits)))\n return Moment(ops_to_keep)\n", "path": "cirq/ops/moment.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simplified time-slice of operations within a sequenced circuit.\"\"\"\n\nfrom typing import (Any, Callable, Dict, FrozenSet, Iterable, Iterator,\n overload, Optional, Tuple, TYPE_CHECKING, TypeVar, Union)\nfrom cirq import protocols\nfrom cirq._compat import deprecated_parameter\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\nTSelf_Moment = TypeVar('TSelf_Moment', bound='Moment')\n\n\nclass Moment:\n \"\"\"A time-slice of operations within a circuit.\n\n Grouping operations into moments is intended to be a strong suggestion to\n whatever is scheduling operations on real hardware. Operations in the same\n moment should execute at the same time (to the extent possible; not all\n operations have the same duration) and it is expected that all operations\n in a moment should be completed before beginning the next moment.\n\n Moment can be indexed by qubit or list of qubits:\n moment[qubit] returns the Operation in the moment which touches the\n given qubit, or throws KeyError if there is no such operation.\n moment[qubits] returns another Moment which consists only of those\n operations which touch at least one of the given qubits. If there\n are no such operations, returns an empty Moment.\n \"\"\"\n\n @deprecated_parameter(\n deadline='v0.9',\n fix='Don\\'t specify a keyword.',\n match=lambda _, kwargs: 'operations' in kwargs,\n parameter_desc='operations',\n rewrite=lambda args, kwargs: (args + (kwargs['operations'],), {}))\n def __init__(self, *contents: 'cirq.OP_TREE') -> None:\n \"\"\"Constructs a moment with the given operations.\n\n Args:\n operations: The operations applied within the moment.\n Will be flattened and frozen into a tuple before storing.\n\n Raises:\n ValueError: A qubit appears more than once.\n \"\"\"\n from cirq.ops import op_tree\n self._operations = tuple(op_tree.flatten_to_ops(contents))\n\n # Check that operations don't overlap.\n affected_qubits = [q for op in self.operations for q in op.qubits]\n self._qubits = frozenset(affected_qubits)\n if len(affected_qubits) != len(self._qubits):\n raise ValueError(\n 'Overlapping operations: {}'.format(self.operations))\n\n @property\n def operations(self) -> Tuple['cirq.Operation', ...]:\n return self._operations\n\n @property\n def qubits(self) -> FrozenSet['cirq.Qid']:\n return self._qubits\n\n def operates_on_single_qubit(self, qubit: 'cirq.Qid') -> bool:\n \"\"\"Determines if the moment has operations touching the given qubit.\n Args:\n qubit: The qubit that may or may not be touched by operations.\n Returns:\n Whether this moment has operations involving the qubit.\n \"\"\"\n return qubit in self.qubits\n\n def operates_on(self, qubits: Iterable['cirq.Qid']) -> bool:\n \"\"\"Determines if the moment has operations touching the given qubits.\n\n Args:\n qubits: The qubits that may or may not be touched by operations.\n\n Returns:\n Whether this moment has operations involving the qubits.\n \"\"\"\n return bool(set(qubits) & self.qubits)\n\n def operation_at(self, qubit: raw_types.Qid) -> Optional['cirq.Operation']:\n \"\"\"Returns the operation on a certain qubit for the moment.\n\n Args:\n qubit: The qubit on which the returned Operation operates\n on.\n\n Returns:\n The operation that operates on the qubit for that moment.\n \"\"\"\n if self.operates_on([qubit]):\n return self.__getitem__(qubit)\n else:\n return None\n\n def with_operation(self, operation: 'cirq.Operation') -> 'cirq.Moment':\n \"\"\"Returns an equal moment, but with the given op added.\n\n Args:\n operation: The operation to append.\n\n Returns:\n The new moment.\n \"\"\"\n if any(q in self._qubits for q in operation.qubits):\n raise ValueError('Overlapping operations: {}'.format(operation))\n\n # Use private variables to facilitate a quick copy\n m = Moment()\n m._operations = self.operations + (operation,)\n m._qubits = frozenset(self._qubits.union(set(operation.qubits)))\n\n return m\n\n def with_operations(self, *contents: 'cirq.OP_TREE') -> 'cirq.Moment':\n \"\"\"Returns a new moment with the given contents added.\n\n Args:\n contents: New operations to add to this moment.\n\n Returns:\n The new moment.\n \"\"\"\n from cirq.ops import op_tree\n\n operations = list(self._operations)\n qubits = set(self._qubits)\n for op in op_tree.flatten_to_ops(contents):\n if any(q in qubits for q in op.qubits):\n raise ValueError('Overlapping operations: {}'.format(op))\n operations.append(op)\n qubits.update(op.qubits)\n\n # Use private variables to facilitate a quick copy\n m = Moment()\n m._operations = tuple(operations)\n m._qubits = frozenset(qubits)\n\n return m\n\n def without_operations_touching(self, qubits: Iterable['cirq.Qid']\n ) -> 'cirq.Moment':\n \"\"\"Returns an equal moment, but without ops on the given qubits.\n\n Args:\n qubits: Operations that touch these will be removed.\n\n Returns:\n The new moment.\n \"\"\"\n qubits = frozenset(qubits)\n if not self.operates_on(qubits):\n return self\n return Moment(\n operation for operation in self.operations\n if qubits.isdisjoint(frozenset(operation.qubits)))\n\n def _operation_touching(self, qubit: raw_types.Qid) -> 'cirq.Operation':\n \"\"\"Returns the operation touching given qubit.\n Args:\n qubit: Operations that touch this qubit will be returned.\n Returns:\n The operation which touches `qubit`.\n \"\"\"\n for op in self.operations:\n if qubit in op.qubits:\n return op\n raise KeyError(\"Moment doesn't act on given qubit\")\n\n def __copy__(self):\n return type(self)(self.operations)\n\n def __bool__(self) -> bool:\n return bool(self.operations)\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return (sorted(self.operations, key=lambda op: op.qubits) == sorted(\n other.operations, key=lambda op: op.qubits))\n\n def _approx_eq_(self, other: Any, atol: Union[int, float]) -> bool:\n \"\"\"See `cirq.protocols.SupportsApproximateEquality`.\"\"\"\n if not isinstance(other, type(self)):\n return NotImplemented\n\n return protocols.approx_eq(sorted(self.operations,\n key=lambda op: op.qubits),\n sorted(other.operations,\n key=lambda op: op.qubits),\n atol=atol)\n\n def __ne__(self, other) -> bool:\n return not self == other\n\n def __hash__(self):\n return hash(\n (Moment, tuple(sorted(self.operations, key=lambda op: op.qubits))))\n\n def __iter__(self) -> Iterator['cirq.Operation']:\n return iter(self.operations)\n\n def __pow__(self, power):\n if power == 1:\n return self\n new_ops = []\n for op in self.operations:\n new_op = protocols.pow(op, power, default=None)\n if new_op is None:\n return NotImplemented\n new_ops.append(new_op)\n return Moment(new_ops)\n\n def __len__(self) -> int:\n return len(self.operations)\n\n def __repr__(self) -> str:\n if not self.operations:\n return 'cirq.Moment()'\n\n block = '\\n'.join([repr(op) + ',' for op in self.operations])\n indented = ' ' + '\\n '.join(block.split('\\n'))\n\n return f'cirq.Moment(\\n{indented}\\n)'\n\n def __str__(self) -> str:\n return ' and '.join(str(op) for op in self.operations)\n\n def transform_qubits(self: TSelf_Moment,\n func: Callable[['cirq.Qid'], 'cirq.Qid']\n ) -> TSelf_Moment:\n \"\"\"Returns the same moment, but with different qubits.\n\n Args:\n func: The function to use to turn each current qubit into a desired\n new qubit.\n\n Returns:\n The receiving moment but with qubits transformed by the given\n function.\n \"\"\"\n return self.__class__(op.transform_qubits(func)\n for op in self.operations)\n\n def _json_dict_(self) -> Dict[str, Any]:\n return protocols.obj_to_dict_helper(self, ['operations'])\n\n @classmethod\n def _from_json_dict_(cls, operations, **kwargs):\n return Moment(operations)\n\n def __add__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment':\n from cirq.circuits import circuit\n if isinstance(other, circuit.Circuit):\n return NotImplemented # Delegate to Circuit.__radd__.\n return self.with_operations(other)\n\n def __sub__(self, other: 'cirq.OP_TREE') -> 'cirq.Moment':\n from cirq.ops import op_tree\n must_remove = set(op_tree.flatten_to_ops(other))\n new_ops = []\n for op in self.operations:\n if op in must_remove:\n must_remove.remove(op)\n else:\n new_ops.append(op)\n if must_remove:\n raise ValueError(f\"Subtracted missing operations from a moment.\\n\"\n f\"Missing operations: {must_remove!r}\\n\"\n f\"Moment: {self!r}\")\n return Moment(new_ops)\n\n # pylint: disable=function-redefined\n @overload\n def __getitem__(self, key: raw_types.Qid) -> 'cirq.Operation':\n pass\n\n @overload\n def __getitem__(self, key: Iterable[raw_types.Qid]) -> 'cirq.Moment':\n pass\n\n def __getitem__(self, key):\n if isinstance(key, raw_types.Qid):\n return self._operation_touching(key)\n elif isinstance(key, Iterable):\n qubits_to_keep = frozenset(key)\n ops_to_keep = tuple(\n op for op in self.operations\n if not qubits_to_keep.isdisjoint(frozenset(op.qubits)))\n return Moment(ops_to_keep)\n", "path": "cirq/ops/moment.py"}]}
3,437
311
gh_patches_debug_31457
rasdani/github-patches
git_diff
mozmeao__snippets-service-1206
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create Freq Capping Goals for Jobs Create Global Frequency Capping goals for Jobs (introduced in #1034). Goals: - Total impressions - Total clicks and button clicks (combined) - Total blocks A `Job` can have multiple goals (freq capping, datetime), first one reached will un-publish the `Job`. Requires #1036. Also `Job`s with Freq Cap goals must get data from Redash more often (TBD). ## Use case: - Publish a Snippet for 1M impressions. It's fine it snippet gets 5K-10K impressions more. ## Questions - [x] How do we handle Jobs with Freq Capping when no Telemetry data is available? - Jobs that have impression, block or click limits set and no metrics data updated for at least 24 hours will be marked Complete. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `snippets/base/management/commands/update_jobs.py` Content: ``` 1 from datetime import datetime 2 3 from django.contrib.auth import get_user_model 4 from django.core.management.base import BaseCommand 5 from django.db import transaction 6 from django.db.models import F, Q 7 8 from snippets.base.models import Job 9 10 11 class Command(BaseCommand): 12 args = "(no args)" 13 help = "Update Jobs" 14 15 @transaction.atomic 16 def handle(self, *args, **options): 17 now = datetime.utcnow() 18 user = get_user_model().objects.get_or_create(username='snippets_bot')[0] 19 count_total_completed = 0 20 21 # Publish Scheduled Jobs with `publish_start` before now or without 22 # publish_start. 23 jobs = Job.objects.filter(status=Job.SCHEDULED).filter( 24 Q(publish_start__lte=now) | Q(publish_start=None) 25 ) 26 count_published = jobs.count() 27 for job in jobs: 28 job.change_status( 29 status=Job.PUBLISHED, 30 user=user, 31 reason='Published start date reached.', 32 ) 33 34 # Disable Published Jobs with `publish_end` before now. 35 jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now) 36 count_publication_end = jobs.count() 37 count_total_completed += count_publication_end 38 39 for job in jobs: 40 job.change_status( 41 status=Job.COMPLETED, 42 user=user, 43 reason='Publication end date reached.', 44 ) 45 46 # Disable Jobs that reached Impression, Click or Block limits. 47 count_limit = {} 48 for limit in ['impressions', 'clicks', 'blocks']: 49 jobs = (Job.objects 50 .filter(status=Job.PUBLISHED) 51 .exclude(**{f'limit_{limit}': 0}) 52 .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')})) 53 for job in jobs: 54 job.change_status( 55 status=Job.COMPLETED, 56 user=user, 57 reason=f'Limit reached: {limit}.', 58 ) 59 60 count_limit[limit] = jobs.count() 61 count_total_completed += count_limit[limit] 62 63 count_running = Job.objects.filter(status=Job.PUBLISHED).count() 64 65 self.stdout.write( 66 f'Jobs Published: {count_published}\n' 67 f'Jobs Completed: {count_total_completed}\n' 68 f' - Reached Publication End Date: {count_publication_end}\n' 69 f' - Reached Impressions Limit: {count_limit["impressions"]}\n' 70 f' - Reached Clicks Limit: {count_limit["clicks"]}\n' 71 f' - Reached Blocks Limit: {count_limit["blocks"]}\n' 72 f'Total Jobs Running: {count_running}\n' 73 ) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py --- a/snippets/base/management/commands/update_jobs.py +++ b/snippets/base/management/commands/update_jobs.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand @@ -60,6 +60,23 @@ count_limit[limit] = jobs.count() count_total_completed += count_limit[limit] + # Disable Jobs that have Impression, Click or Block limits but don't + # have metrics data for at least 24h. This is to handle cases where the + # Metrics Pipeline is broken. + yesterday = datetime.utcnow() - timedelta(days=1) + jobs = (Job.objects + .filter(status=Job.PUBLISHED) + .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0) + .filter(metric_last_update__lt=yesterday)) + for job in jobs: + job.change_status( + status=Job.COMPLETED, + user=user, + reason=f'Premature termination due to missing metrics.', + ) + count_premature_termination = jobs.count() + count_total_completed += count_premature_termination + count_running = Job.objects.filter(status=Job.PUBLISHED).count() self.stdout.write( @@ -69,5 +86,6 @@ f' - Reached Impressions Limit: {count_limit["impressions"]}\n' f' - Reached Clicks Limit: {count_limit["clicks"]}\n' f' - Reached Blocks Limit: {count_limit["blocks"]}\n' + f' - Premature Termination due to missing metrics: {count_premature_termination}\n' f'Total Jobs Running: {count_running}\n' )
{"golden_diff": "diff --git a/snippets/base/management/commands/update_jobs.py b/snippets/base/management/commands/update_jobs.py\n--- a/snippets/base/management/commands/update_jobs.py\n+++ b/snippets/base/management/commands/update_jobs.py\n@@ -1,4 +1,4 @@\n-from datetime import datetime\n+from datetime import datetime, timedelta\n \n from django.contrib.auth import get_user_model\n from django.core.management.base import BaseCommand\n@@ -60,6 +60,23 @@\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n \n+ # Disable Jobs that have Impression, Click or Block limits but don't\n+ # have metrics data for at least 24h. This is to handle cases where the\n+ # Metrics Pipeline is broken.\n+ yesterday = datetime.utcnow() - timedelta(days=1)\n+ jobs = (Job.objects\n+ .filter(status=Job.PUBLISHED)\n+ .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n+ .filter(metric_last_update__lt=yesterday))\n+ for job in jobs:\n+ job.change_status(\n+ status=Job.COMPLETED,\n+ user=user,\n+ reason=f'Premature termination due to missing metrics.',\n+ )\n+ count_premature_termination = jobs.count()\n+ count_total_completed += count_premature_termination\n+\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n \n self.stdout.write(\n@@ -69,5 +86,6 @@\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n+ f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "issue": "Create Freq Capping Goals for Jobs\nCreate Global Frequency Capping goals for Jobs (introduced in #1034). \r\n\r\nGoals:\r\n - Total impressions\r\n - Total clicks and button clicks (combined)\r\n - Total blocks\r\n\r\nA `Job` can have multiple goals (freq capping, datetime), first one reached will un-publish the `Job`.\r\n\r\nRequires #1036. Also `Job`s with Freq Cap goals must get data from Redash more often (TBD).\r\n\r\n## Use case:\r\n - Publish a Snippet for 1M impressions. It's fine it snippet gets 5K-10K impressions more.\r\n\r\n\r\n## Questions\r\n - [x] How do we handle Jobs with Freq Capping when no Telemetry data is available? \r\n - Jobs that have impression, block or click limits set and no metrics data updated for at least 24 hours will be marked Complete.\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.db.models import F, Q\n\nfrom snippets.base.models import Job\n\n\nclass Command(BaseCommand):\n args = \"(no args)\"\n help = \"Update Jobs\"\n\n @transaction.atomic\n def handle(self, *args, **options):\n now = datetime.utcnow()\n user = get_user_model().objects.get_or_create(username='snippets_bot')[0]\n count_total_completed = 0\n\n # Publish Scheduled Jobs with `publish_start` before now or without\n # publish_start.\n jobs = Job.objects.filter(status=Job.SCHEDULED).filter(\n Q(publish_start__lte=now) | Q(publish_start=None)\n )\n count_published = jobs.count()\n for job in jobs:\n job.change_status(\n status=Job.PUBLISHED,\n user=user,\n reason='Published start date reached.',\n )\n\n # Disable Published Jobs with `publish_end` before now.\n jobs = Job.objects.filter(status=Job.PUBLISHED, publish_end__lte=now)\n count_publication_end = jobs.count()\n count_total_completed += count_publication_end\n\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason='Publication end date reached.',\n )\n\n # Disable Jobs that reached Impression, Click or Block limits.\n count_limit = {}\n for limit in ['impressions', 'clicks', 'blocks']:\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(**{f'limit_{limit}': 0})\n .filter(**{f'limit_{limit}__lte': F(f'metric_{limit}')}))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Limit reached: {limit}.',\n )\n\n count_limit[limit] = jobs.count()\n count_total_completed += count_limit[limit]\n\n # Disable Jobs that have Impression, Click or Block limits but don't\n # have metrics data for at least 24h. This is to handle cases where the\n # Metrics Pipeline is broken.\n yesterday = datetime.utcnow() - timedelta(days=1)\n jobs = (Job.objects\n .filter(status=Job.PUBLISHED)\n .exclude(limit_impressions=0, limit_clicks=0, limit_blocks=0)\n .filter(metric_last_update__lt=yesterday))\n for job in jobs:\n job.change_status(\n status=Job.COMPLETED,\n user=user,\n reason=f'Premature termination due to missing metrics.',\n )\n count_premature_termination = jobs.count()\n count_total_completed += count_premature_termination\n\n count_running = Job.objects.filter(status=Job.PUBLISHED).count()\n\n self.stdout.write(\n f'Jobs Published: {count_published}\\n'\n f'Jobs Completed: {count_total_completed}\\n'\n f' - Reached Publication End Date: {count_publication_end}\\n'\n f' - Reached Impressions Limit: {count_limit[\"impressions\"]}\\n'\n f' - Reached Clicks Limit: {count_limit[\"clicks\"]}\\n'\n f' - Reached Blocks Limit: {count_limit[\"blocks\"]}\\n'\n f' - Premature Termination due to missing metrics: {count_premature_termination}\\n'\n f'Total Jobs Running: {count_running}\\n'\n )\n", "path": "snippets/base/management/commands/update_jobs.py"}]}
1,176
440
gh_patches_debug_10443
rasdani/github-patches
git_diff
conan-io__conan-4096
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Conan Server 1.10 is broken #### Description Hi! I'm trying to start conan server but seems like is broken. $ conan_server > Traceback (most recent call last): > File "/opt/miniconda3/bin/conan_server", line 7, in <module> > from conans.conan_server import run > File "/opt/miniconda3/lib/python3.6/site-packages/conans/conan_server.py", line 1, in <module> > from conans.server.server_launcher import main > File "/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py", line 53, in <module> > launcher = ServerLauncher() > File "/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py", line 47, in __init__ server_capabilities, only_v1=False) > TypeError: __init__() got an unexpected keyword argument 'only_v1' #### Environment Conan Versions: 1.10 Python version: 3.6, 3.7 OS: Linux Ubuntu 18.04 #### Steps to reproduce pip3 install conan==1.10.0 conan_server or using Conan Docker Tools: BUILD_CONAN_SERVER_IMAGE=1 python build.py docker run -t -p 9300:9300 --name conan_server conanio/conan_server:latest - [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md). - [X] I've specified the Conan version, operating system version and any tool that can be relevant. - [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/server/server_launcher.py` Content: ``` 1 #!/usr/bin/python 2 import os 3 from conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator 4 from conans.server.conf import get_server_store 5 from conans.server.rest.server import ConanServer 6 from conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager 7 from conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager 8 from conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION 9 from conans.server.plugin_loader import load_authentication_plugin 10 from conans.model.version import Version 11 from conans.server.migrate import migrate_and_get_server_config 12 from conans import __version__ as SERVER_VERSION, REVISIONS 13 from conans.paths import conan_expand_user 14 from conans import SERVER_CAPABILITIES 15 16 17 class ServerLauncher(object): 18 def __init__(self): 19 user_folder = conan_expand_user("~") 20 server_folder = os.path.join(user_folder, '.conan_server') 21 22 server_config = migrate_and_get_server_config(user_folder) 23 custom_auth = server_config.custom_authenticator 24 if custom_auth: 25 authenticator = load_authentication_plugin(server_folder, custom_auth) 26 else: 27 authenticator = BasicAuthenticator(dict(server_config.users)) 28 29 authorizer = BasicAuthorizer(server_config.read_permissions, 30 server_config.write_permissions) 31 credentials_manager = JWTCredentialsManager(server_config.jwt_secret, 32 server_config.jwt_expire_time) 33 34 updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret, 35 server_config.authorize_timeout) 36 37 server_store = get_server_store(server_config.disk_storage_path, 38 server_config.public_url, 39 updown_auth_manager=updown_auth_manager) 40 41 server_capabilities = SERVER_CAPABILITIES 42 server_capabilities.append(REVISIONS) 43 44 self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager, 45 authorizer, authenticator, server_store, 46 Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION), 47 server_capabilities, only_v1=False) 48 49 def launch(self): 50 self.ra.run(host="0.0.0.0") 51 52 53 launcher = ServerLauncher() 54 app = launcher.ra.root_app 55 56 57 def main(*args): 58 launcher.launch() 59 60 61 if __name__ == "__main__": 62 main() 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/server/server_launcher.py b/conans/server/server_launcher.py --- a/conans/server/server_launcher.py +++ b/conans/server/server_launcher.py @@ -44,7 +44,7 @@ self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager, authorizer, authenticator, server_store, Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION), - server_capabilities, only_v1=False) + server_capabilities) def launch(self): self.ra.run(host="0.0.0.0")
{"golden_diff": "diff --git a/conans/server/server_launcher.py b/conans/server/server_launcher.py\n--- a/conans/server/server_launcher.py\n+++ b/conans/server/server_launcher.py\n@@ -44,7 +44,7 @@\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n- server_capabilities, only_v1=False)\n+ server_capabilities)\n \n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n", "issue": "Conan Server 1.10 is broken\n#### Description\r\nHi!\r\n\r\nI'm trying to start conan server but seems like is broken.\r\n\r\n $ conan_server\r\n\r\n> Traceback (most recent call last):\r\n> File \"/opt/miniconda3/bin/conan_server\", line 7, in <module>\r\n> from conans.conan_server import run\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/conan_server.py\", line 1, in <module>\r\n> from conans.server.server_launcher import main\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py\", line 53, in <module>\r\n> launcher = ServerLauncher()\r\n> File \"/opt/miniconda3/lib/python3.6/site-packages/conans/server/server_launcher.py\", line 47, in __init__\r\n server_capabilities, only_v1=False)\r\n> TypeError: __init__() got an unexpected keyword argument 'only_v1'\r\n\r\n#### Environment\r\nConan Versions: 1.10\r\nPython version: 3.6, 3.7\r\nOS: Linux Ubuntu 18.04\r\n\r\n#### Steps to reproduce\r\n pip3 install conan==1.10.0\r\n conan_server\r\n\r\nor using Conan Docker Tools:\r\n\r\n BUILD_CONAN_SERVER_IMAGE=1 python build.py\r\n docker run -t -p 9300:9300 --name conan_server conanio/conan_server:latest \r\n\r\n\r\n- [X] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [X] I've specified the Conan version, operating system version and any tool that can be relevant.\r\n- [X] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python\nimport os\nfrom conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nfrom conans.server.conf import get_server_store\nfrom conans.server.rest.server import ConanServer\nfrom conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager\nfrom conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION\nfrom conans.server.plugin_loader import load_authentication_plugin\nfrom conans.model.version import Version\nfrom conans.server.migrate import migrate_and_get_server_config\nfrom conans import __version__ as SERVER_VERSION, REVISIONS\nfrom conans.paths import conan_expand_user\nfrom conans import SERVER_CAPABILITIES\n\n\nclass ServerLauncher(object):\n def __init__(self):\n user_folder = conan_expand_user(\"~\")\n server_folder = os.path.join(user_folder, '.conan_server')\n\n server_config = migrate_and_get_server_config(user_folder)\n custom_auth = server_config.custom_authenticator\n if custom_auth:\n authenticator = load_authentication_plugin(server_folder, custom_auth)\n else:\n authenticator = BasicAuthenticator(dict(server_config.users))\n\n authorizer = BasicAuthorizer(server_config.read_permissions,\n server_config.write_permissions)\n credentials_manager = JWTCredentialsManager(server_config.jwt_secret,\n server_config.jwt_expire_time)\n\n updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,\n server_config.authorize_timeout)\n\n server_store = get_server_store(server_config.disk_storage_path,\n server_config.public_url,\n updown_auth_manager=updown_auth_manager)\n\n server_capabilities = SERVER_CAPABILITIES\n server_capabilities.append(REVISIONS)\n\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n server_capabilities, only_v1=False)\n\n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n\n\nlauncher = ServerLauncher()\napp = launcher.ra.root_app\n\n\ndef main(*args):\n launcher.launch()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "conans/server/server_launcher.py"}], "after_files": [{"content": "#!/usr/bin/python\nimport os\nfrom conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nfrom conans.server.conf import get_server_store\nfrom conans.server.rest.server import ConanServer\nfrom conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager\nfrom conans.server.conf import MIN_CLIENT_COMPATIBLE_VERSION\nfrom conans.server.plugin_loader import load_authentication_plugin\nfrom conans.model.version import Version\nfrom conans.server.migrate import migrate_and_get_server_config\nfrom conans import __version__ as SERVER_VERSION, REVISIONS\nfrom conans.paths import conan_expand_user\nfrom conans import SERVER_CAPABILITIES\n\n\nclass ServerLauncher(object):\n def __init__(self):\n user_folder = conan_expand_user(\"~\")\n server_folder = os.path.join(user_folder, '.conan_server')\n\n server_config = migrate_and_get_server_config(user_folder)\n custom_auth = server_config.custom_authenticator\n if custom_auth:\n authenticator = load_authentication_plugin(server_folder, custom_auth)\n else:\n authenticator = BasicAuthenticator(dict(server_config.users))\n\n authorizer = BasicAuthorizer(server_config.read_permissions,\n server_config.write_permissions)\n credentials_manager = JWTCredentialsManager(server_config.jwt_secret,\n server_config.jwt_expire_time)\n\n updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,\n server_config.authorize_timeout)\n\n server_store = get_server_store(server_config.disk_storage_path,\n server_config.public_url,\n updown_auth_manager=updown_auth_manager)\n\n server_capabilities = SERVER_CAPABILITIES\n server_capabilities.append(REVISIONS)\n\n self.ra = ConanServer(server_config.port, credentials_manager, updown_auth_manager,\n authorizer, authenticator, server_store,\n Version(SERVER_VERSION), Version(MIN_CLIENT_COMPATIBLE_VERSION),\n server_capabilities)\n\n def launch(self):\n self.ra.run(host=\"0.0.0.0\")\n\n\nlauncher = ServerLauncher()\napp = launcher.ra.root_app\n\n\ndef main(*args):\n launcher.launch()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "conans/server/server_launcher.py"}]}
1,259
125
gh_patches_debug_26488
rasdani/github-patches
git_diff
ResonantGeoData__ResonantGeoData-469
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GeoAdmin widget incorrectly rendering geometry The `OpenLayersWidget` we are using in the admin view of the geo Django models is incorrectly rendering geometry. I suspect this is a simple coordinate issue where we need to transform the geometry before passing it to the model admin widget: https://user-images.githubusercontent.com/22067021/117475485-5761c300-af19-11eb-97b5-7cd8e29bf14c.mov --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django-rgd-imagery/rgd_imagery/admin/base.py` Content: ``` 1 from django.contrib import admin 2 3 # from django.contrib.admin import SimpleListFilter 4 from django.contrib.gis.admin import OSMGeoAdmin 5 from rgd.admin.mixins import ( 6 MODIFIABLE_FILTERS, 7 SPATIAL_ENTRY_FILTERS, 8 TASK_EVENT_FILTERS, 9 TASK_EVENT_READONLY, 10 _FileGetNameMixin, 11 reprocess, 12 ) 13 from rgd.utility import get_or_create_no_commit 14 from rgd_imagery.models import ( 15 BandMeta, 16 ConvertedImage, 17 Image, 18 ImageMeta, 19 ImageSet, 20 ImageSetSpatial, 21 Raster, 22 ) 23 24 25 def _make_image_set_from_images(images): 26 """Images should be an iterable, not a queryset.""" 27 imset = ImageSet() 28 imset.save() # Have to save before adding to ManyToManyField? 29 for image in images: 30 imset.images.add(image) 31 imset.save() 32 return imset 33 34 35 def make_image_set_from_images(modeladmin, request, queryset): 36 """Make an `ImageSet` of the selected `ImageMeta`s. 37 38 This is an action on `ImageMeta`. 39 """ 40 return _make_image_set_from_images(queryset.all()) 41 42 43 def _make_raster_from_image_set(imset): 44 raster = Raster() 45 raster.image_set = imset 46 raster.save() 47 return raster 48 49 50 def make_raster_from_images(modeladmin, request, queryset): 51 """Make a raster of the selected `ImageMeta`s. 52 53 This is an action on `ImageMeta` 54 """ 55 imset = make_image_set_from_images(modeladmin, request, queryset) 56 return _make_raster_from_image_set(imset) 57 58 59 def make_raster_from_image_set(modeladmin, request, queryset): 60 """Make a raster of the selected `ImageSet`. 61 62 This is an action on `ImageSet`. 63 """ 64 rasters = [] 65 for imset in queryset.all(): 66 rasters.append(_make_raster_from_image_set(imset)) 67 return rasters 68 69 70 def make_raster_for_each_image(modeladmin, request, queryset): 71 """Make a raster for each of the selected `ImageMeta`s. 72 73 This is an action on `ImageMeta`. 74 75 This creates one raster for each image entry. 76 """ 77 rasters = [] 78 for img in queryset.all(): 79 imset = _make_image_set_from_images( 80 [ 81 img, 82 ] 83 ) 84 rasters.append(_make_raster_from_image_set(imset)) 85 return rasters 86 87 88 def clean_empty_image_sets(modeladmin, request, queryset): 89 """Delete empty `ImageSet`s.""" 90 q = queryset.filter(images=None) 91 q.delete() 92 93 94 def convert_images(modeladmin, request, queryset): 95 for image in queryset.all(): 96 entry, created = get_or_create_no_commit(ConvertedImage, source_image=image) 97 entry.save() 98 99 100 # class ImageSetSpatialFilter(SimpleListFilter): 101 # title = 'Spatial' 102 # parameter_name = 'imagesetspatial' 103 # 104 # def lookups(self, request, model_admin): 105 # return [True, False] 106 # 107 # def queryset(self, request, queryset): 108 # if not self.value(): 109 # return queryset.filter(imagesetspatial__isnull=True) 110 # if self.value(): 111 # return queryset.filter(imagesetspatial__isnull=False) 112 113 114 class ImageSetSpatialInline(admin.StackedInline): 115 model = ImageSetSpatial 116 fk_name = 'image_set' 117 list_display = ( 118 'pk', 119 'modified', 120 'created', 121 ) 122 list_filter = MODIFIABLE_FILTERS + SPATIAL_ENTRY_FILTERS 123 124 125 @admin.register(ImageSet) 126 class ImageSetAdmin(OSMGeoAdmin): 127 list_display = ( 128 'pk', 129 'name', 130 'count', 131 'modified', 132 'created', 133 ) 134 actions = ( 135 make_raster_from_image_set, 136 clean_empty_image_sets, 137 ) 138 list_filter = MODIFIABLE_FILTERS # (ImageSetSpatialFilter, ) 139 inlines = (ImageSetSpatialInline,) 140 141 142 class BandMetaInline(admin.StackedInline): 143 model = BandMeta 144 fk_name = 'parent_image' 145 146 list_display = ( 147 'pk', 148 'parent_image', 149 'modified', 150 'created', 151 ) 152 readonly_fields = ( 153 'mean', 154 'max', 155 'min', 156 'modified', 157 'created', 158 'parent_image', 159 'std', 160 'nodata_value', 161 'dtype', 162 'band_number', 163 ) 164 165 def has_add_permission(self, request, obj=None): 166 """Prevent user from adding more.""" 167 return False 168 169 170 class ImageMetaInline(admin.StackedInline): 171 model = ImageMeta 172 fk_name = 'parent_image' 173 list_display = ( 174 'pk', 175 'modified', 176 'created', 177 ) 178 readonly_fields = ( 179 'number_of_bands', 180 'parent_image', 181 'height', 182 'width', 183 'driver', 184 'modified', 185 'created', 186 ) 187 list_filter = MODIFIABLE_FILTERS + ( 188 'number_of_bands', 189 'driver', 190 ) 191 192 193 @admin.register(Image) 194 class ImageAdmin(OSMGeoAdmin, _FileGetNameMixin): 195 list_display = ( 196 'pk', 197 'get_name', 198 'status', 199 'modified', 200 'created', 201 'image_data_link', 202 ) 203 readonly_fields = ( 204 'modified', 205 'created', 206 ) + TASK_EVENT_READONLY 207 actions = ( 208 reprocess, 209 make_image_set_from_images, 210 make_raster_from_images, 211 make_raster_for_each_image, 212 convert_images, 213 ) 214 list_filter = MODIFIABLE_FILTERS + TASK_EVENT_FILTERS 215 inlines = ( 216 ImageMetaInline, 217 BandMetaInline, 218 ) 219 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django-rgd-imagery/rgd_imagery/admin/base.py b/django-rgd-imagery/rgd_imagery/admin/base.py --- a/django-rgd-imagery/rgd_imagery/admin/base.py +++ b/django-rgd-imagery/rgd_imagery/admin/base.py @@ -1,3 +1,5 @@ +import copy + from django.contrib import admin # from django.contrib.admin import SimpleListFilter @@ -111,7 +113,7 @@ # return queryset.filter(imagesetspatial__isnull=False) -class ImageSetSpatialInline(admin.StackedInline): +class ImageSetSpatialInline(OSMGeoAdmin, admin.StackedInline): model = ImageSetSpatial fk_name = 'image_set' list_display = ( @@ -121,6 +123,20 @@ ) list_filter = MODIFIABLE_FILTERS + SPATIAL_ENTRY_FILTERS + def __init__(self, parent_model, admin_site): + self.admin_site = admin_site + self.parent_model = parent_model + self.opts = self.model._meta + self.has_registered_model = admin_site.is_registered(self.model) + overrides = copy.deepcopy(admin.options.FORMFIELD_FOR_DBFIELD_DEFAULTS) + for k, v in self.formfield_overrides.items(): + overrides.setdefault(k, {}).update(v) + self.formfield_overrides = overrides + if self.verbose_name is None: + self.verbose_name = self.model._meta.verbose_name + if self.verbose_name_plural is None: + self.verbose_name_plural = self.model._meta.verbose_name_plural + @admin.register(ImageSet) class ImageSetAdmin(OSMGeoAdmin):
{"golden_diff": "diff --git a/django-rgd-imagery/rgd_imagery/admin/base.py b/django-rgd-imagery/rgd_imagery/admin/base.py\n--- a/django-rgd-imagery/rgd_imagery/admin/base.py\n+++ b/django-rgd-imagery/rgd_imagery/admin/base.py\n@@ -1,3 +1,5 @@\n+import copy\n+\n from django.contrib import admin\n \n # from django.contrib.admin import SimpleListFilter\n@@ -111,7 +113,7 @@\n # return queryset.filter(imagesetspatial__isnull=False)\n \n \n-class ImageSetSpatialInline(admin.StackedInline):\n+class ImageSetSpatialInline(OSMGeoAdmin, admin.StackedInline):\n model = ImageSetSpatial\n fk_name = 'image_set'\n list_display = (\n@@ -121,6 +123,20 @@\n )\n list_filter = MODIFIABLE_FILTERS + SPATIAL_ENTRY_FILTERS\n \n+ def __init__(self, parent_model, admin_site):\n+ self.admin_site = admin_site\n+ self.parent_model = parent_model\n+ self.opts = self.model._meta\n+ self.has_registered_model = admin_site.is_registered(self.model)\n+ overrides = copy.deepcopy(admin.options.FORMFIELD_FOR_DBFIELD_DEFAULTS)\n+ for k, v in self.formfield_overrides.items():\n+ overrides.setdefault(k, {}).update(v)\n+ self.formfield_overrides = overrides\n+ if self.verbose_name is None:\n+ self.verbose_name = self.model._meta.verbose_name\n+ if self.verbose_name_plural is None:\n+ self.verbose_name_plural = self.model._meta.verbose_name_plural\n+\n \n @admin.register(ImageSet)\n class ImageSetAdmin(OSMGeoAdmin):\n", "issue": "GeoAdmin widget incorrectly rendering geometry\nThe `OpenLayersWidget` we are using in the admin view of the geo Django models is incorrectly rendering geometry. I suspect this is a simple coordinate issue where we need to transform the geometry before passing it to the model admin widget:\r\n\r\n\r\nhttps://user-images.githubusercontent.com/22067021/117475485-5761c300-af19-11eb-97b5-7cd8e29bf14c.mov\r\n\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\n# from django.contrib.admin import SimpleListFilter\nfrom django.contrib.gis.admin import OSMGeoAdmin\nfrom rgd.admin.mixins import (\n MODIFIABLE_FILTERS,\n SPATIAL_ENTRY_FILTERS,\n TASK_EVENT_FILTERS,\n TASK_EVENT_READONLY,\n _FileGetNameMixin,\n reprocess,\n)\nfrom rgd.utility import get_or_create_no_commit\nfrom rgd_imagery.models import (\n BandMeta,\n ConvertedImage,\n Image,\n ImageMeta,\n ImageSet,\n ImageSetSpatial,\n Raster,\n)\n\n\ndef _make_image_set_from_images(images):\n \"\"\"Images should be an iterable, not a queryset.\"\"\"\n imset = ImageSet()\n imset.save() # Have to save before adding to ManyToManyField?\n for image in images:\n imset.images.add(image)\n imset.save()\n return imset\n\n\ndef make_image_set_from_images(modeladmin, request, queryset):\n \"\"\"Make an `ImageSet` of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`.\n \"\"\"\n return _make_image_set_from_images(queryset.all())\n\n\ndef _make_raster_from_image_set(imset):\n raster = Raster()\n raster.image_set = imset\n raster.save()\n return raster\n\n\ndef make_raster_from_images(modeladmin, request, queryset):\n \"\"\"Make a raster of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`\n \"\"\"\n imset = make_image_set_from_images(modeladmin, request, queryset)\n return _make_raster_from_image_set(imset)\n\n\ndef make_raster_from_image_set(modeladmin, request, queryset):\n \"\"\"Make a raster of the selected `ImageSet`.\n\n This is an action on `ImageSet`.\n \"\"\"\n rasters = []\n for imset in queryset.all():\n rasters.append(_make_raster_from_image_set(imset))\n return rasters\n\n\ndef make_raster_for_each_image(modeladmin, request, queryset):\n \"\"\"Make a raster for each of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`.\n\n This creates one raster for each image entry.\n \"\"\"\n rasters = []\n for img in queryset.all():\n imset = _make_image_set_from_images(\n [\n img,\n ]\n )\n rasters.append(_make_raster_from_image_set(imset))\n return rasters\n\n\ndef clean_empty_image_sets(modeladmin, request, queryset):\n \"\"\"Delete empty `ImageSet`s.\"\"\"\n q = queryset.filter(images=None)\n q.delete()\n\n\ndef convert_images(modeladmin, request, queryset):\n for image in queryset.all():\n entry, created = get_or_create_no_commit(ConvertedImage, source_image=image)\n entry.save()\n\n\n# class ImageSetSpatialFilter(SimpleListFilter):\n# title = 'Spatial'\n# parameter_name = 'imagesetspatial'\n#\n# def lookups(self, request, model_admin):\n# return [True, False]\n#\n# def queryset(self, request, queryset):\n# if not self.value():\n# return queryset.filter(imagesetspatial__isnull=True)\n# if self.value():\n# return queryset.filter(imagesetspatial__isnull=False)\n\n\nclass ImageSetSpatialInline(admin.StackedInline):\n model = ImageSetSpatial\n fk_name = 'image_set'\n list_display = (\n 'pk',\n 'modified',\n 'created',\n )\n list_filter = MODIFIABLE_FILTERS + SPATIAL_ENTRY_FILTERS\n\n\[email protected](ImageSet)\nclass ImageSetAdmin(OSMGeoAdmin):\n list_display = (\n 'pk',\n 'name',\n 'count',\n 'modified',\n 'created',\n )\n actions = (\n make_raster_from_image_set,\n clean_empty_image_sets,\n )\n list_filter = MODIFIABLE_FILTERS # (ImageSetSpatialFilter, )\n inlines = (ImageSetSpatialInline,)\n\n\nclass BandMetaInline(admin.StackedInline):\n model = BandMeta\n fk_name = 'parent_image'\n\n list_display = (\n 'pk',\n 'parent_image',\n 'modified',\n 'created',\n )\n readonly_fields = (\n 'mean',\n 'max',\n 'min',\n 'modified',\n 'created',\n 'parent_image',\n 'std',\n 'nodata_value',\n 'dtype',\n 'band_number',\n )\n\n def has_add_permission(self, request, obj=None):\n \"\"\"Prevent user from adding more.\"\"\"\n return False\n\n\nclass ImageMetaInline(admin.StackedInline):\n model = ImageMeta\n fk_name = 'parent_image'\n list_display = (\n 'pk',\n 'modified',\n 'created',\n )\n readonly_fields = (\n 'number_of_bands',\n 'parent_image',\n 'height',\n 'width',\n 'driver',\n 'modified',\n 'created',\n )\n list_filter = MODIFIABLE_FILTERS + (\n 'number_of_bands',\n 'driver',\n )\n\n\[email protected](Image)\nclass ImageAdmin(OSMGeoAdmin, _FileGetNameMixin):\n list_display = (\n 'pk',\n 'get_name',\n 'status',\n 'modified',\n 'created',\n 'image_data_link',\n )\n readonly_fields = (\n 'modified',\n 'created',\n ) + TASK_EVENT_READONLY\n actions = (\n reprocess,\n make_image_set_from_images,\n make_raster_from_images,\n make_raster_for_each_image,\n convert_images,\n )\n list_filter = MODIFIABLE_FILTERS + TASK_EVENT_FILTERS\n inlines = (\n ImageMetaInline,\n BandMetaInline,\n )\n", "path": "django-rgd-imagery/rgd_imagery/admin/base.py"}], "after_files": [{"content": "import copy\n\nfrom django.contrib import admin\n\n# from django.contrib.admin import SimpleListFilter\nfrom django.contrib.gis.admin import OSMGeoAdmin\nfrom rgd.admin.mixins import (\n MODIFIABLE_FILTERS,\n SPATIAL_ENTRY_FILTERS,\n TASK_EVENT_FILTERS,\n TASK_EVENT_READONLY,\n _FileGetNameMixin,\n reprocess,\n)\nfrom rgd.utility import get_or_create_no_commit\nfrom rgd_imagery.models import (\n BandMeta,\n ConvertedImage,\n Image,\n ImageMeta,\n ImageSet,\n ImageSetSpatial,\n Raster,\n)\n\n\ndef _make_image_set_from_images(images):\n \"\"\"Images should be an iterable, not a queryset.\"\"\"\n imset = ImageSet()\n imset.save() # Have to save before adding to ManyToManyField?\n for image in images:\n imset.images.add(image)\n imset.save()\n return imset\n\n\ndef make_image_set_from_images(modeladmin, request, queryset):\n \"\"\"Make an `ImageSet` of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`.\n \"\"\"\n return _make_image_set_from_images(queryset.all())\n\n\ndef _make_raster_from_image_set(imset):\n raster = Raster()\n raster.image_set = imset\n raster.save()\n return raster\n\n\ndef make_raster_from_images(modeladmin, request, queryset):\n \"\"\"Make a raster of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`\n \"\"\"\n imset = make_image_set_from_images(modeladmin, request, queryset)\n return _make_raster_from_image_set(imset)\n\n\ndef make_raster_from_image_set(modeladmin, request, queryset):\n \"\"\"Make a raster of the selected `ImageSet`.\n\n This is an action on `ImageSet`.\n \"\"\"\n rasters = []\n for imset in queryset.all():\n rasters.append(_make_raster_from_image_set(imset))\n return rasters\n\n\ndef make_raster_for_each_image(modeladmin, request, queryset):\n \"\"\"Make a raster for each of the selected `ImageMeta`s.\n\n This is an action on `ImageMeta`.\n\n This creates one raster for each image entry.\n \"\"\"\n rasters = []\n for img in queryset.all():\n imset = _make_image_set_from_images(\n [\n img,\n ]\n )\n rasters.append(_make_raster_from_image_set(imset))\n return rasters\n\n\ndef clean_empty_image_sets(modeladmin, request, queryset):\n \"\"\"Delete empty `ImageSet`s.\"\"\"\n q = queryset.filter(images=None)\n q.delete()\n\n\ndef convert_images(modeladmin, request, queryset):\n for image in queryset.all():\n entry, created = get_or_create_no_commit(ConvertedImage, source_image=image)\n entry.save()\n\n\n# class ImageSetSpatialFilter(SimpleListFilter):\n# title = 'Spatial'\n# parameter_name = 'imagesetspatial'\n#\n# def lookups(self, request, model_admin):\n# return [True, False]\n#\n# def queryset(self, request, queryset):\n# if not self.value():\n# return queryset.filter(imagesetspatial__isnull=True)\n# if self.value():\n# return queryset.filter(imagesetspatial__isnull=False)\n\n\nclass ImageSetSpatialInline(OSMGeoAdmin, admin.StackedInline):\n model = ImageSetSpatial\n fk_name = 'image_set'\n list_display = (\n 'pk',\n 'modified',\n 'created',\n )\n list_filter = MODIFIABLE_FILTERS + SPATIAL_ENTRY_FILTERS\n\n def __init__(self, parent_model, admin_site):\n self.admin_site = admin_site\n self.parent_model = parent_model\n self.opts = self.model._meta\n self.has_registered_model = admin_site.is_registered(self.model)\n overrides = copy.deepcopy(admin.options.FORMFIELD_FOR_DBFIELD_DEFAULTS)\n for k, v in self.formfield_overrides.items():\n overrides.setdefault(k, {}).update(v)\n self.formfield_overrides = overrides\n if self.verbose_name is None:\n self.verbose_name = self.model._meta.verbose_name\n if self.verbose_name_plural is None:\n self.verbose_name_plural = self.model._meta.verbose_name_plural\n\n\[email protected](ImageSet)\nclass ImageSetAdmin(OSMGeoAdmin):\n list_display = (\n 'pk',\n 'name',\n 'count',\n 'modified',\n 'created',\n )\n actions = (\n make_raster_from_image_set,\n clean_empty_image_sets,\n )\n list_filter = MODIFIABLE_FILTERS # (ImageSetSpatialFilter, )\n inlines = (ImageSetSpatialInline,)\n\n\nclass BandMetaInline(admin.StackedInline):\n model = BandMeta\n fk_name = 'parent_image'\n\n list_display = (\n 'pk',\n 'parent_image',\n 'modified',\n 'created',\n )\n readonly_fields = (\n 'mean',\n 'max',\n 'min',\n 'modified',\n 'created',\n 'parent_image',\n 'std',\n 'nodata_value',\n 'dtype',\n 'band_number',\n )\n\n def has_add_permission(self, request, obj=None):\n \"\"\"Prevent user from adding more.\"\"\"\n return False\n\n\nclass ImageMetaInline(admin.StackedInline):\n model = ImageMeta\n fk_name = 'parent_image'\n list_display = (\n 'pk',\n 'modified',\n 'created',\n )\n readonly_fields = (\n 'number_of_bands',\n 'parent_image',\n 'height',\n 'width',\n 'driver',\n 'modified',\n 'created',\n )\n list_filter = MODIFIABLE_FILTERS + (\n 'number_of_bands',\n 'driver',\n )\n\n\[email protected](Image)\nclass ImageAdmin(OSMGeoAdmin, _FileGetNameMixin):\n list_display = (\n 'pk',\n 'get_name',\n 'status',\n 'modified',\n 'created',\n 'image_data_link',\n )\n readonly_fields = (\n 'modified',\n 'created',\n ) + TASK_EVENT_READONLY\n actions = (\n reprocess,\n make_image_set_from_images,\n make_raster_from_images,\n make_raster_for_each_image,\n convert_images,\n )\n list_filter = MODIFIABLE_FILTERS + TASK_EVENT_FILTERS\n inlines = (\n ImageMetaInline,\n BandMetaInline,\n )\n", "path": "django-rgd-imagery/rgd_imagery/admin/base.py"}]}
2,173
386
gh_patches_debug_9464
rasdani/github-patches
git_diff
Mailu__Mailu-1358
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to specify SMTP port Some relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address. Add ability to specify SMTP port Some relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/admin/mailu/internal/views/postfix.py` Content: ``` 1 from mailu import models 2 from mailu.internal import internal 3 4 import flask 5 import re 6 7 8 @internal.route("/postfix/domain/<domain_name>") 9 def postfix_mailbox_domain(domain_name): 10 if re.match("^\[.*\]$", domain_name): 11 return flask.abort(404) 12 domain = models.Domain.query.get(domain_name) or \ 13 models.Alternative.query.get(domain_name) or \ 14 flask.abort(404) 15 return flask.jsonify(domain.name) 16 17 18 @internal.route("/postfix/mailbox/<path:email>") 19 def postfix_mailbox_map(email): 20 user = models.User.query.get(email) or flask.abort(404) 21 return flask.jsonify(user.email) 22 23 24 @internal.route("/postfix/alias/<path:alias>") 25 def postfix_alias_map(alias): 26 localpart, domain_name = models.Email.resolve_domain(alias) 27 if localpart is None: 28 return flask.jsonify(domain_name) 29 destination = models.Email.resolve_destination(localpart, domain_name) 30 return flask.jsonify(",".join(destination)) if destination else flask.abort(404) 31 32 33 @internal.route("/postfix/transport/<path:email>") 34 def postfix_transport(email): 35 if email == '*' or re.match("(^|.*@)\[.*\]$", email): 36 return flask.abort(404) 37 localpart, domain_name = models.Email.resolve_domain(email) 38 relay = models.Relay.query.get(domain_name) or flask.abort(404) 39 return flask.jsonify("smtp:[{}]".format(relay.smtp)) 40 41 42 @internal.route("/postfix/sender/login/<path:sender>") 43 def postfix_sender_login(sender): 44 localpart, domain_name = models.Email.resolve_domain(sender) 45 if localpart is None: 46 return flask.abort(404) 47 destination = models.Email.resolve_destination(localpart, domain_name, True) 48 return flask.jsonify(",".join(destination)) if destination else flask.abort(404) 49 50 51 @internal.route("/postfix/sender/access/<path:sender>") 52 def postfix_sender_access(sender): 53 """ Simply reject any sender that pretends to be from a local domain 54 """ 55 if not is_void_address(sender): 56 localpart, domain_name = models.Email.resolve_domain(sender) 57 return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404) 58 else: 59 return flask.abort(404) 60 61 62 def is_void_address(email): 63 '''True if the email is void (null) email address. 64 ''' 65 if email.startswith('<') and email.endswith('>'): 66 email = email[1:-1] 67 # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's 68 # consider void any such thing. 69 return '@' not in email 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py --- a/core/admin/mailu/internal/views/postfix.py +++ b/core/admin/mailu/internal/views/postfix.py @@ -36,7 +36,11 @@ return flask.abort(404) localpart, domain_name = models.Email.resolve_domain(email) relay = models.Relay.query.get(domain_name) or flask.abort(404) - return flask.jsonify("smtp:[{}]".format(relay.smtp)) + ret = "smtp:[{0}]".format(relay.smtp) + if ":" in relay.smtp: + split = relay.smtp.split(':') + ret = "smtp:[{0}]:{1}".format(split[0], split[1]) + return flask.jsonify(ret) @internal.route("/postfix/sender/login/<path:sender>")
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -36,7 +36,11 @@\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n- return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n+ ret = \"smtp:[{0}]\".format(relay.smtp)\n+ if \":\" in relay.smtp:\n+ split = relay.smtp.split(':')\n+ ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n+ return flask.jsonify(ret)\n \n \n @internal.route(\"/postfix/sender/login/<path:sender>\")\n", "issue": "Add ability to specify SMTP port\nSome relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.\nAdd ability to specify SMTP port\nSome relayed domains may live on machines that accept SMTP on a nonstandard port, e.g., port 2525. This would allow users to relay for domains by appending `:port` to the hostname or IP address.\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}], "after_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n ret = \"smtp:[{0}]\".format(relay.smtp)\n if \":\" in relay.smtp:\n split = relay.smtp.split(':')\n ret = \"smtp:[{0}]:{1}\".format(split[0], split[1])\n return flask.jsonify(ret)\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}]}
1,101
197
gh_patches_debug_28300
rasdani/github-patches
git_diff
IntelPython__dpnp-1355
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support gpairs dpnp implementation on dpbench The [dpnp implementation of the gpairs algorithm](https://github.com/adarshyoga/dpbench/blob/main/dpbench/benchmarks/gpairs/gpairs_dpnp.py) crashes with SyclError showing failure in dpctl. How to reproduce: 1) Follow [instructions ](https://github.com/adarshyoga/dpbench/blob/main/README.md)to setup dpbench 2) Run gpairs - python -c "import dpbench; dpbench.run_benchmark(\"gpairs\") --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dpnp/dpnp_container.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # ***************************************************************************** 3 # Copyright (c) 2016-2023, Intel Corporation 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # - Redistributions of source code must retain the above copyright notice, 9 # this list of conditions and the following disclaimer. 10 # - Redistributions in binary form must reproduce the above copyright notice, 11 # this list of conditions and the following disclaimer in the documentation 12 # and/or other materials provided with the distribution. 13 # 14 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 18 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 19 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 20 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 21 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 22 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 23 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 24 # THE POSSIBILITY OF SUCH DAMAGE. 25 # ***************************************************************************** 26 27 """ 28 Container specific part of the DPNP 29 30 Notes 31 ----- 32 This module contains code and dependency on diffrent containers used in DPNP 33 34 """ 35 36 37 import dpctl.utils as dpu 38 import dpctl.tensor as dpt 39 40 from dpnp.dpnp_array import dpnp_array 41 import dpnp 42 43 44 __all__ = [ 45 "arange", 46 "asarray", 47 "empty", 48 "eye", 49 "full", 50 "linspace", 51 "ones" 52 "tril", 53 "triu", 54 "zeros", 55 ] 56 57 58 def arange(start, 59 /, 60 stop=None, 61 step=1, 62 *, 63 dtype=None, 64 device=None, 65 usm_type="device", 66 sycl_queue=None): 67 """Validate input parameters before passing them into `dpctl.tensor` module""" 68 dpu.validate_usm_type(usm_type, allow_none=False) 69 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 70 71 array_obj = dpt.arange(start, 72 stop=stop, 73 step=step, 74 dtype=dtype, 75 usm_type=usm_type, 76 sycl_queue=sycl_queue_normalized) 77 78 return dpnp_array(array_obj.shape, buffer=array_obj) 79 80 81 def asarray(x1, 82 dtype=None, 83 copy=False, 84 order="C", 85 device=None, 86 usm_type=None, 87 sycl_queue=None): 88 """Converts `x1` to `dpnp_array`.""" 89 if isinstance(x1, dpnp_array): 90 x1_obj = x1.get_array() 91 else: 92 x1_obj = x1 93 94 sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue) 95 if order is None: 96 order = 'C' 97 98 """Converts incoming 'x1' object to 'dpnp_array'.""" 99 array_obj = dpt.asarray(x1_obj, 100 dtype=dtype, 101 copy=copy, 102 order=order, 103 usm_type=usm_type, 104 sycl_queue=sycl_queue_normalized) 105 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 106 107 108 def empty(shape, 109 *, 110 dtype=None, 111 order="C", 112 device=None, 113 usm_type="device", 114 sycl_queue=None): 115 """Validate input parameters before passing them into `dpctl.tensor` module""" 116 dpu.validate_usm_type(usm_type, allow_none=False) 117 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 118 if order is None: 119 order = 'C' 120 121 """Creates `dpnp_array` from uninitialized USM allocation.""" 122 array_obj = dpt.empty(shape, 123 dtype=dtype, 124 order=order, 125 usm_type=usm_type, 126 sycl_queue=sycl_queue_normalized) 127 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 128 129 130 def eye(N, 131 M=None, 132 /, 133 *, 134 k=0, 135 dtype=None, 136 order="C", 137 device=None, 138 usm_type="device", 139 sycl_queue=None): 140 """Validate input parameters before passing them into `dpctl.tensor` module""" 141 dpu.validate_usm_type(usm_type, allow_none=False) 142 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 143 if order is None: 144 order = 'C' 145 146 """Creates `dpnp_array` with ones on the `k`th diagonal.""" 147 array_obj = dpt.eye(N, 148 M, 149 k=k, 150 dtype=dtype, 151 order=order, 152 usm_type=usm_type, 153 sycl_queue=sycl_queue_normalized) 154 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 155 156 157 def full(shape, 158 fill_value, 159 *, 160 dtype=None, 161 order="C", 162 device=None, 163 usm_type=None, 164 sycl_queue=None): 165 """Validate input parameters before passing them into `dpctl.tensor` module""" 166 dpu.validate_usm_type(usm_type, allow_none=True) 167 sycl_queue_normalized = dpnp.get_normalized_queue_device(fill_value, sycl_queue=sycl_queue, device=device) 168 if order is None: 169 order = 'C' 170 171 if isinstance(fill_value, dpnp_array): 172 fill_value = fill_value.get_array() 173 174 """Creates `dpnp_array` having a specified shape, filled with fill_value.""" 175 array_obj = dpt.full(shape, 176 fill_value, 177 dtype=dtype, 178 order=order, 179 usm_type=usm_type, 180 sycl_queue=sycl_queue_normalized) 181 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 182 183 184 def linspace(start, 185 stop, 186 /, 187 num, 188 *, 189 dtype=None, 190 device=None, 191 usm_type="device", 192 sycl_queue=None, 193 endpoint=True): 194 """Validate input parameters before passing them into `dpctl.tensor` module""" 195 dpu.validate_usm_type(usm_type, allow_none=False) 196 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 197 198 """Creates `dpnp_array` with evenly spaced numbers of specified interval.""" 199 array_obj = dpt.linspace(start, 200 stop, 201 num, 202 dtype=dtype, 203 usm_type=usm_type, 204 sycl_queue=sycl_queue_normalized, 205 endpoint=endpoint) 206 return dpnp_array(array_obj.shape, buffer=array_obj) 207 208 209 def meshgrid(*xi, indexing="xy"): 210 """Creates list of `dpnp_array` coordinate matrices from vectors.""" 211 if len(xi) == 0: 212 return [] 213 arrays = tuple(x.get_array() if isinstance(x, dpnp_array) else x for x in xi) 214 arrays_obj = dpt.meshgrid(*arrays, indexing=indexing) 215 return [dpnp_array._create_from_usm_ndarray(array_obj) for array_obj in arrays_obj] 216 217 218 def ones(shape, 219 *, 220 dtype=None, 221 order="C", 222 device=None, 223 usm_type="device", 224 sycl_queue=None): 225 """Validate input parameters before passing them into `dpctl.tensor` module""" 226 dpu.validate_usm_type(usm_type, allow_none=False) 227 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 228 if order is None: 229 order = 'C' 230 231 """Creates `dpnp_array` of ones with the given shape, dtype, and order.""" 232 array_obj = dpt.ones(shape, 233 dtype=dtype, 234 order=order, 235 usm_type=usm_type, 236 sycl_queue=sycl_queue_normalized) 237 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 238 239 240 def tril(x1, /, *, k=0): 241 """"Creates `dpnp_array` as lower triangular part of an input array.""" 242 array_obj = dpt.tril(x1.get_array() if isinstance(x1, dpnp_array) else x1, k) 243 return dpnp_array(array_obj.shape, buffer=array_obj, order="K") 244 245 246 def triu(x1, /, *, k=0): 247 """"Creates `dpnp_array` as upper triangular part of an input array.""" 248 array_obj = dpt.triu(x1.get_array() if isinstance(x1, dpnp_array) else x1, k) 249 return dpnp_array(array_obj.shape, buffer=array_obj, order="K") 250 251 252 def zeros(shape, 253 *, 254 dtype=None, 255 order="C", 256 device=None, 257 usm_type="device", 258 sycl_queue=None): 259 """Validate input parameters before passing them into `dpctl.tensor` module""" 260 dpu.validate_usm_type(usm_type, allow_none=False) 261 sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device) 262 if order is None: 263 order = 'C' 264 265 """Creates `dpnp_array` of zeros with the given shape, dtype, and order.""" 266 array_obj = dpt.zeros(shape, 267 dtype=dtype, 268 order=order, 269 usm_type=usm_type, 270 sycl_queue=sycl_queue_normalized) 271 return dpnp_array(array_obj.shape, buffer=array_obj, order=order) 272 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py --- a/dpnp/dpnp_container.py +++ b/dpnp/dpnp_container.py @@ -86,22 +86,34 @@ usm_type=None, sycl_queue=None): """Converts `x1` to `dpnp_array`.""" - if isinstance(x1, dpnp_array): - x1_obj = x1.get_array() - else: - x1_obj = x1 + dpu.validate_usm_type(usm_type, allow_none=True) - sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue) if order is None: order = 'C' """Converts incoming 'x1' object to 'dpnp_array'.""" - array_obj = dpt.asarray(x1_obj, - dtype=dtype, - copy=copy, - order=order, - usm_type=usm_type, - sycl_queue=sycl_queue_normalized) + if isinstance(x1, (list, tuple, range)): + array_obj = dpt.asarray(x1, + dtype=dtype, + copy=copy, + order=order, + device=device, + usm_type=usm_type, + sycl_queue=sycl_queue) + else: + if isinstance(x1, dpnp_array): + x1_obj = x1.get_array() + else: + x1_obj = x1 + + sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue) + + array_obj = dpt.asarray(x1_obj, + dtype=dtype, + copy=copy, + order=order, + usm_type=usm_type, + sycl_queue=sycl_queue_normalized) return dpnp_array(array_obj.shape, buffer=array_obj, order=order)
{"golden_diff": "diff --git a/dpnp/dpnp_container.py b/dpnp/dpnp_container.py\n--- a/dpnp/dpnp_container.py\n+++ b/dpnp/dpnp_container.py\n@@ -86,22 +86,34 @@\n usm_type=None,\n sycl_queue=None):\n \"\"\"Converts `x1` to `dpnp_array`.\"\"\"\n- if isinstance(x1, dpnp_array):\n- x1_obj = x1.get_array()\n- else:\n- x1_obj = x1\n+ dpu.validate_usm_type(usm_type, allow_none=True)\n \n- sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue)\n if order is None:\n order = 'C'\n \n \"\"\"Converts incoming 'x1' object to 'dpnp_array'.\"\"\"\n- array_obj = dpt.asarray(x1_obj,\n- dtype=dtype,\n- copy=copy,\n- order=order,\n- usm_type=usm_type,\n- sycl_queue=sycl_queue_normalized)\n+ if isinstance(x1, (list, tuple, range)):\n+ array_obj = dpt.asarray(x1,\n+ dtype=dtype,\n+ copy=copy,\n+ order=order,\n+ device=device,\n+ usm_type=usm_type,\n+ sycl_queue=sycl_queue)\n+ else:\n+ if isinstance(x1, dpnp_array):\n+ x1_obj = x1.get_array()\n+ else:\n+ x1_obj = x1\n+\n+ sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue)\n+\n+ array_obj = dpt.asarray(x1_obj,\n+ dtype=dtype,\n+ copy=copy,\n+ order=order,\n+ usm_type=usm_type,\n+ sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n", "issue": "Support gpairs dpnp implementation on dpbench\nThe [dpnp implementation of the gpairs algorithm](https://github.com/adarshyoga/dpbench/blob/main/dpbench/benchmarks/gpairs/gpairs_dpnp.py) crashes with SyclError showing failure in dpctl.\r\n\r\nHow to reproduce:\r\n\r\n1) Follow [instructions ](https://github.com/adarshyoga/dpbench/blob/main/README.md)to setup dpbench\r\n2) Run gpairs - python -c \"import dpbench; dpbench.run_benchmark(\\\"gpairs\\\")\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# *****************************************************************************\n# Copyright (c) 2016-2023, Intel Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\nContainer specific part of the DPNP\n\nNotes\n-----\nThis module contains code and dependency on diffrent containers used in DPNP\n\n\"\"\"\n\n\nimport dpctl.utils as dpu\nimport dpctl.tensor as dpt\n\nfrom dpnp.dpnp_array import dpnp_array\nimport dpnp\n\n\n__all__ = [\n \"arange\",\n \"asarray\",\n \"empty\",\n \"eye\",\n \"full\",\n \"linspace\",\n \"ones\"\n \"tril\",\n \"triu\",\n \"zeros\",\n]\n\n\ndef arange(start,\n /,\n stop=None,\n step=1,\n *,\n dtype=None,\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n\n array_obj = dpt.arange(start,\n stop=stop,\n step=step,\n dtype=dtype,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n\n return dpnp_array(array_obj.shape, buffer=array_obj)\n\n\ndef asarray(x1,\n dtype=None,\n copy=False,\n order=\"C\",\n device=None,\n usm_type=None,\n sycl_queue=None):\n \"\"\"Converts `x1` to `dpnp_array`.\"\"\"\n if isinstance(x1, dpnp_array):\n x1_obj = x1.get_array()\n else:\n x1_obj = x1\n\n sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue)\n if order is None:\n order = 'C'\n\n \"\"\"Converts incoming 'x1' object to 'dpnp_array'.\"\"\"\n array_obj = dpt.asarray(x1_obj,\n dtype=dtype,\n copy=copy,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef empty(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` from uninitialized USM allocation.\"\"\"\n array_obj = dpt.empty(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef eye(N,\n M=None,\n /,\n *,\n k=0,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` with ones on the `k`th diagonal.\"\"\"\n array_obj = dpt.eye(N,\n M,\n k=k,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef full(shape,\n fill_value,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=None,\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=True)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(fill_value, sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n if isinstance(fill_value, dpnp_array):\n fill_value = fill_value.get_array()\n\n \"\"\"Creates `dpnp_array` having a specified shape, filled with fill_value.\"\"\"\n array_obj = dpt.full(shape,\n fill_value,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef linspace(start,\n stop,\n /,\n num,\n *,\n dtype=None,\n device=None,\n usm_type=\"device\",\n sycl_queue=None,\n endpoint=True):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n\n \"\"\"Creates `dpnp_array` with evenly spaced numbers of specified interval.\"\"\"\n array_obj = dpt.linspace(start,\n stop,\n num,\n dtype=dtype,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized,\n endpoint=endpoint)\n return dpnp_array(array_obj.shape, buffer=array_obj)\n\n\ndef meshgrid(*xi, indexing=\"xy\"):\n \"\"\"Creates list of `dpnp_array` coordinate matrices from vectors.\"\"\"\n if len(xi) == 0:\n return []\n arrays = tuple(x.get_array() if isinstance(x, dpnp_array) else x for x in xi)\n arrays_obj = dpt.meshgrid(*arrays, indexing=indexing)\n return [dpnp_array._create_from_usm_ndarray(array_obj) for array_obj in arrays_obj]\n\n\ndef ones(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` of ones with the given shape, dtype, and order.\"\"\"\n array_obj = dpt.ones(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef tril(x1, /, *, k=0):\n \"\"\"\"Creates `dpnp_array` as lower triangular part of an input array.\"\"\"\n array_obj = dpt.tril(x1.get_array() if isinstance(x1, dpnp_array) else x1, k)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=\"K\")\n\n\ndef triu(x1, /, *, k=0):\n \"\"\"\"Creates `dpnp_array` as upper triangular part of an input array.\"\"\"\n array_obj = dpt.triu(x1.get_array() if isinstance(x1, dpnp_array) else x1, k)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=\"K\")\n\n\ndef zeros(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` of zeros with the given shape, dtype, and order.\"\"\"\n array_obj = dpt.zeros(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n", "path": "dpnp/dpnp_container.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# *****************************************************************************\n# Copyright (c) 2016-2023, Intel Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# - Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# - Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n# *****************************************************************************\n\n\"\"\"\nContainer specific part of the DPNP\n\nNotes\n-----\nThis module contains code and dependency on diffrent containers used in DPNP\n\n\"\"\"\n\n\nimport dpctl.utils as dpu\nimport dpctl.tensor as dpt\n\nfrom dpnp.dpnp_array import dpnp_array\nimport dpnp\n\n\n__all__ = [\n \"arange\",\n \"asarray\",\n \"empty\",\n \"eye\",\n \"full\",\n \"linspace\",\n \"ones\"\n \"tril\",\n \"triu\",\n \"zeros\",\n]\n\n\ndef arange(start,\n /,\n stop=None,\n step=1,\n *,\n dtype=None,\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n\n array_obj = dpt.arange(start,\n stop=stop,\n step=step,\n dtype=dtype,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n\n return dpnp_array(array_obj.shape, buffer=array_obj)\n\n\ndef asarray(x1,\n dtype=None,\n copy=False,\n order=\"C\",\n device=None,\n usm_type=None,\n sycl_queue=None):\n \"\"\"Converts `x1` to `dpnp_array`.\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=True)\n\n if order is None:\n order = 'C'\n\n \"\"\"Converts incoming 'x1' object to 'dpnp_array'.\"\"\"\n if isinstance(x1, (list, tuple, range)):\n array_obj = dpt.asarray(x1,\n dtype=dtype,\n copy=copy,\n order=order,\n device=device,\n usm_type=usm_type,\n sycl_queue=sycl_queue)\n else:\n if isinstance(x1, dpnp_array):\n x1_obj = x1.get_array()\n else:\n x1_obj = x1\n\n sycl_queue_normalized = dpnp.get_normalized_queue_device(x1_obj, device=device, sycl_queue=sycl_queue)\n\n array_obj = dpt.asarray(x1_obj,\n dtype=dtype,\n copy=copy,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef empty(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` from uninitialized USM allocation.\"\"\"\n array_obj = dpt.empty(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef eye(N,\n M=None,\n /,\n *,\n k=0,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` with ones on the `k`th diagonal.\"\"\"\n array_obj = dpt.eye(N,\n M,\n k=k,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef full(shape,\n fill_value,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=None,\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=True)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(fill_value, sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n if isinstance(fill_value, dpnp_array):\n fill_value = fill_value.get_array()\n\n \"\"\"Creates `dpnp_array` having a specified shape, filled with fill_value.\"\"\"\n array_obj = dpt.full(shape,\n fill_value,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef linspace(start,\n stop,\n /,\n num,\n *,\n dtype=None,\n device=None,\n usm_type=\"device\",\n sycl_queue=None,\n endpoint=True):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n\n \"\"\"Creates `dpnp_array` with evenly spaced numbers of specified interval.\"\"\"\n array_obj = dpt.linspace(start,\n stop,\n num,\n dtype=dtype,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized,\n endpoint=endpoint)\n return dpnp_array(array_obj.shape, buffer=array_obj)\n\n\ndef meshgrid(*xi, indexing=\"xy\"):\n \"\"\"Creates list of `dpnp_array` coordinate matrices from vectors.\"\"\"\n if len(xi) == 0:\n return []\n arrays = tuple(x.get_array() if isinstance(x, dpnp_array) else x for x in xi)\n arrays_obj = dpt.meshgrid(*arrays, indexing=indexing)\n return [dpnp_array._create_from_usm_ndarray(array_obj) for array_obj in arrays_obj]\n\n\ndef ones(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` of ones with the given shape, dtype, and order.\"\"\"\n array_obj = dpt.ones(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n\n\ndef tril(x1, /, *, k=0):\n \"\"\"\"Creates `dpnp_array` as lower triangular part of an input array.\"\"\"\n array_obj = dpt.tril(x1.get_array() if isinstance(x1, dpnp_array) else x1, k)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=\"K\")\n\n\ndef triu(x1, /, *, k=0):\n \"\"\"\"Creates `dpnp_array` as upper triangular part of an input array.\"\"\"\n array_obj = dpt.triu(x1.get_array() if isinstance(x1, dpnp_array) else x1, k)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=\"K\")\n\n\ndef zeros(shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None):\n \"\"\"Validate input parameters before passing them into `dpctl.tensor` module\"\"\"\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(sycl_queue=sycl_queue, device=device)\n if order is None:\n order = 'C'\n\n \"\"\"Creates `dpnp_array` of zeros with the given shape, dtype, and order.\"\"\"\n array_obj = dpt.zeros(shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized)\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)\n", "path": "dpnp/dpnp_container.py"}]}
3,183
446
gh_patches_debug_27614
rasdani/github-patches
git_diff
plone__Products.CMFPlone-2326
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Some Errors cannot render their exceptions during the creation of portal This is a follow-up for #2203. A exception of the following type cannot rendered in Plone 5.2: ``` ValueError Traceback (innermost last): Module ZServer.ZPublisher.Publish, line 144, in publish Module ZPublisher.mapply, line 85, in mapply Module Products.PDBDebugMode.runcall, line 68, in pdb_runcall Module ZServer.ZPublisher.Publish, line 44, in call_object Module Products.CMFPlone.browser.admin, line 271, in __call__ Module Products.CMFPlone.factory, line 146, in addPloneSite Module Products.GenericSetup.tool, line 388, in runAllImportStepsFromProfile __traceback_info__: profile-Products.CMFPlone:plone Module Products.GenericSetup.tool, line 1433, in _runImportStepsFromContext Module Products.GenericSetup.tool, line 1245, in _doRunImportStep __traceback_info__: rolemap Module Products.GenericSetup.rolemap, line 102, in importRolemap Module <string>, line 8, in manage_permission Module AccessControl.requestmethod, line 79, in _curried Module OFS.role, line 86, in manage_permission Module AccessControl.rolemanager, line 224, in manage_permission ValueError: The permission <em>Use Database Methods</em> is invalid. ``` The issue is that the portal and the main_template exist during rendering of ExceptionView but the main_template cannot be rendered because the context of the exception is not the Plone site but the Application. Thus the exception-template cannot be rendered properly. To fix that i suggest we try to render main_template and switch to the basic_template when that fails. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/browser/exceptions.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from AccessControl import getSecurityManager 3 from Products.Five import BrowserView 4 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile 5 from zExceptions.ExceptionFormatter import format_exception 6 from zope.component.hooks import getSite 7 8 import json 9 import sys 10 11 12 class ExceptionView(BrowserView): 13 basic_template = ViewPageTemplateFile('templates/basic_error_message.pt') 14 15 def is_manager(self): 16 return getSecurityManager().checkPermission( 17 'Manage portal', self.context) 18 19 def __call__(self): 20 exception = self.context 21 self.context = self.__parent__ 22 request = self.request 23 24 error_type = exception.__class__.__name__ 25 exc_type, value, traceback = sys.exc_info() 26 error_tb = ''.join( 27 format_exception(exc_type, value, traceback, as_html=True)) 28 request.response.setStatus(exc_type) 29 30 # Indicate exception as JSON 31 if "text/html" not in request.getHeader('Accept', ''): 32 request.response.setHeader("Content-Type", "application/json") 33 return json.dumps({ 34 'error_type': error_type, 35 }) 36 37 if getSite() is None: 38 # We cannot get the site, so we cannot render our nice template 39 template = self.basic_template 40 else: 41 # Use a simplified template if main_template is not available 42 try: 43 self.context.unrestrictedTraverse('main_template') 44 except: 45 template = self.basic_template 46 else: 47 template = self.index 48 49 # Render page with user-facing error notice 50 request.set('disable_border', True) 51 request.set('disable_plone.leftcolumn', True) 52 request.set('disable_plone.rightcolumn', True) 53 54 return template( 55 error_type=error_type, 56 error_tb=error_tb, 57 ) 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py --- a/Products/CMFPlone/browser/exceptions.py +++ b/Products/CMFPlone/browser/exceptions.py @@ -3,7 +3,6 @@ from Products.Five import BrowserView from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from zExceptions.ExceptionFormatter import format_exception -from zope.component.hooks import getSite import json import sys @@ -34,24 +33,16 @@ 'error_type': error_type, }) - if getSite() is None: - # We cannot get the site, so we cannot render our nice template - template = self.basic_template - else: - # Use a simplified template if main_template is not available - try: - self.context.unrestrictedTraverse('main_template') - except: - template = self.basic_template - else: - template = self.index - # Render page with user-facing error notice request.set('disable_border', True) request.set('disable_plone.leftcolumn', True) request.set('disable_plone.rightcolumn', True) - return template( - error_type=error_type, - error_tb=error_tb, - ) + try: + return self.index( + error_type=error_type, + error_tb=error_tb) + except: + return self.basic_template( + error_type=error_type, + error_tb=error_tb)
{"golden_diff": "diff --git a/Products/CMFPlone/browser/exceptions.py b/Products/CMFPlone/browser/exceptions.py\n--- a/Products/CMFPlone/browser/exceptions.py\n+++ b/Products/CMFPlone/browser/exceptions.py\n@@ -3,7 +3,6 @@\n from Products.Five import BrowserView\n from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n from zExceptions.ExceptionFormatter import format_exception\n-from zope.component.hooks import getSite\n \n import json\n import sys\n@@ -34,24 +33,16 @@\n 'error_type': error_type,\n })\n \n- if getSite() is None:\n- # We cannot get the site, so we cannot render our nice template\n- template = self.basic_template\n- else:\n- # Use a simplified template if main_template is not available\n- try:\n- self.context.unrestrictedTraverse('main_template')\n- except:\n- template = self.basic_template\n- else:\n- template = self.index\n-\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n \n- return template(\n- error_type=error_type,\n- error_tb=error_tb,\n- )\n+ try:\n+ return self.index(\n+ error_type=error_type,\n+ error_tb=error_tb)\n+ except:\n+ return self.basic_template(\n+ error_type=error_type,\n+ error_tb=error_tb)\n", "issue": "Some Errors cannot render their exceptions during the creation of portal\nThis is a follow-up for #2203.\r\n\r\nA exception of the following type cannot rendered in Plone 5.2:\r\n\r\n```\r\nValueError\r\n\r\nTraceback (innermost last):\r\n\r\n Module ZServer.ZPublisher.Publish, line 144, in publish\r\n Module ZPublisher.mapply, line 85, in mapply\r\n Module Products.PDBDebugMode.runcall, line 68, in pdb_runcall\r\n Module ZServer.ZPublisher.Publish, line 44, in call_object\r\n Module Products.CMFPlone.browser.admin, line 271, in __call__\r\n Module Products.CMFPlone.factory, line 146, in addPloneSite\r\n Module Products.GenericSetup.tool, line 388, in runAllImportStepsFromProfile\r\n __traceback_info__: profile-Products.CMFPlone:plone\r\n Module Products.GenericSetup.tool, line 1433, in _runImportStepsFromContext\r\n Module Products.GenericSetup.tool, line 1245, in _doRunImportStep\r\n __traceback_info__: rolemap\r\n Module Products.GenericSetup.rolemap, line 102, in importRolemap\r\n Module <string>, line 8, in manage_permission\r\n Module AccessControl.requestmethod, line 79, in _curried\r\n Module OFS.role, line 86, in manage_permission\r\n Module AccessControl.rolemanager, line 224, in manage_permission\r\n\r\nValueError: The permission <em>Use Database Methods</em> is invalid. \r\n```\r\n\r\nThe issue is that the portal and the main_template exist during rendering of ExceptionView but the main_template cannot be rendered because the context of the exception is not the Plone site but the Application. Thus the exception-template cannot be rendered properly. To fix that i suggest we try to render main_template and switch to the basic_template when that fails.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\nfrom zope.component.hooks import getSite\n\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n if getSite() is None:\n # We cannot get the site, so we cannot render our nice template\n template = self.basic_template\n else:\n # Use a simplified template if main_template is not available\n try:\n self.context.unrestrictedTraverse('main_template')\n except:\n template = self.basic_template\n else:\n template = self.index\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n return template(\n error_type=error_type,\n error_tb=error_tb,\n )\n", "path": "Products/CMFPlone/browser/exceptions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import getSecurityManager\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom zExceptions.ExceptionFormatter import format_exception\n\nimport json\nimport sys\n\n\nclass ExceptionView(BrowserView):\n basic_template = ViewPageTemplateFile('templates/basic_error_message.pt')\n\n def is_manager(self):\n return getSecurityManager().checkPermission(\n 'Manage portal', self.context)\n\n def __call__(self):\n exception = self.context\n self.context = self.__parent__\n request = self.request\n\n error_type = exception.__class__.__name__\n exc_type, value, traceback = sys.exc_info()\n error_tb = ''.join(\n format_exception(exc_type, value, traceback, as_html=True))\n request.response.setStatus(exc_type)\n\n # Indicate exception as JSON\n if \"text/html\" not in request.getHeader('Accept', ''):\n request.response.setHeader(\"Content-Type\", \"application/json\")\n return json.dumps({\n 'error_type': error_type,\n })\n\n # Render page with user-facing error notice\n request.set('disable_border', True)\n request.set('disable_plone.leftcolumn', True)\n request.set('disable_plone.rightcolumn', True)\n\n try:\n return self.index(\n error_type=error_type,\n error_tb=error_tb)\n except:\n return self.basic_template(\n error_type=error_type,\n error_tb=error_tb)\n", "path": "Products/CMFPlone/browser/exceptions.py"}]}
1,185
352
gh_patches_debug_2405
rasdani/github-patches
git_diff
googleapis__google-api-python-client-1221
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Published package is missing discovery files in discovery_cache Many thanks to the @wyk9787 for noticing this and reaching out. All calls to `discovery.build()` using `2.0.0` fail with "unknown api name or version". ```python from googleapiclient import discovery client = discovery.build("cloudprofiler", "v2") ``` This is because the published package has no `discovery_cache/documents` directory. 1. `python3 -m venv env` 2. `source env/bin/activate` 3. `python3 -m pip install google-api-python-client` 4. `ls env/lib/python*/site-packages/googleapiclient/discovery_cache` ``` busunkim@busunkim:~/github$ ls env/lib/python*/site-packages/googleapiclient/discovery_cache appengine_memcache.py base.py file_cache.py __init__.py __pycache__ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2014 Google Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Setup script for Google API Python client. 16 17 Also installs included versions of third party libraries, if those libraries 18 are not already installed. 19 """ 20 from __future__ import print_function 21 22 import sys 23 24 if sys.version_info < (3, 6): 25 print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr) 26 sys.exit(1) 27 28 import io 29 import os 30 from setuptools import setup 31 32 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"] 33 34 install_requires = [ 35 "httplib2>=0.15.0,<1dev", 36 "google-auth>=1.16.0,<2dev", 37 "google-auth-httplib2>=0.0.3", 38 "google-api-core>=1.21.0,<2dev", 39 "six>=1.13.0,<2dev", 40 "uritemplate>=3.0.0,<4dev", 41 ] 42 43 package_root = os.path.abspath(os.path.dirname(__file__)) 44 45 readme_filename = os.path.join(package_root, "README.md") 46 with io.open(readme_filename, encoding="utf-8") as readme_file: 47 readme = readme_file.read() 48 49 version = "2.0.1" 50 51 setup( 52 name="google-api-python-client", 53 version=version, 54 description="Google API Client Library for Python", 55 long_description=readme, 56 long_description_content_type='text/markdown', 57 author="Google LLC", 58 author_email="[email protected]", 59 url="https://github.com/googleapis/google-api-python-client/", 60 install_requires=install_requires, 61 python_requires=">=3.6", 62 packages=packages, 63 package_data={}, 64 license="Apache 2.0", 65 keywords="google api client", 66 classifiers=[ 67 "Programming Language :: Python :: 3", 68 "Programming Language :: Python :: 3.6", 69 "Programming Language :: Python :: 3.7", 70 "Programming Language :: Python :: 3.8", 71 "Programming Language :: Python :: 3.9", 72 "Development Status :: 5 - Production/Stable", 73 "Intended Audience :: Developers", 74 "License :: OSI Approved :: Apache Software License", 75 "Operating System :: OS Independent", 76 "Topic :: Internet :: WWW/HTTP", 77 ], 78 ) 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -60,7 +60,7 @@ install_requires=install_requires, python_requires=">=3.6", packages=packages, - package_data={}, + package_data={"googleapiclient": ["discovery_cache/documents/*.json"]}, license="Apache 2.0", keywords="google api client", classifiers=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,7 +60,7 @@\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n- package_data={},\n+ package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n", "issue": "Published package is missing discovery files in discovery_cache\nMany thanks to the @wyk9787 for noticing this and reaching out.\r\n\r\nAll calls to `discovery.build()` using `2.0.0` fail with \"unknown api name or version\".\r\n\r\n```python\r\nfrom googleapiclient import discovery\r\n\r\nclient = discovery.build(\"cloudprofiler\", \"v2\")\r\n```\r\n\r\n\r\nThis is because the published package has no `discovery_cache/documents` directory. \r\n\r\n1. `python3 -m venv env`\r\n2. `source env/bin/activate`\r\n3. `python3 -m pip install google-api-python-client`\r\n4. `ls env/lib/python*/site-packages/googleapiclient/discovery_cache`\r\n\r\n```\r\nbusunkim@busunkim:~/github$ ls env/lib/python*/site-packages/googleapiclient/discovery_cache\r\nappengine_memcache.py base.py file_cache.py __init__.py __pycache__\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 6):\n print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0,<2dev\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"2.0.1\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.6\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
1,245
97
gh_patches_debug_9729
rasdani/github-patches
git_diff
gratipay__gratipay.com-1494
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- can't stay logged in from @amaltson via [Twitter](https://twitter.com/amaltson/status/332678066985893889): > trying to log in with GitHub, each time I go to my account page, I get logged out.. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gittip/security/authentication.py` Content: ``` 1 """Defines website authentication helpers. 2 """ 3 import rfc822 4 import time 5 6 from aspen import Response 7 from gittip.security import csrf 8 from gittip.security.user import User 9 10 11 BEGINNING_OF_EPOCH = rfc822.formatdate(0) 12 TIMEOUT = 60 * 60 * 24 * 7 # one week 13 ROLES = ['anonymous', 'authenticated', 'owner', 'admin'] 14 ROLES_SHOULD_BE = "It should be one of: {}.".format(', '.join(ROLES)) 15 16 17 class NoMinimumRoleSpecified(Exception): 18 def __str__(self): 19 return "There is no minimum_role specified in the simplate at {}. {}" \ 20 .format(self.args[0], ROLES_SHOULD_BE) 21 22 class BadMinimumRole(Exception): 23 def __str__(self): 24 return "The minimum_role specific in {} is bad: {}. {}" \ 25 .format(self.args[0], self.args[1], ROLES_SHOULD_BE) 26 27 28 def inbound(request): 29 """Authenticate from a cookie or an API key in basic auth. 30 """ 31 user = None 32 if 'Authorization' in request.headers: 33 header = request.headers['authorization'] 34 if header.startswith('Basic '): 35 creds = header[len('Basic '):].decode('base64') 36 token, ignored = creds.split(':') 37 user = User.from_api_key(token) 38 39 # We don't require CSRF if they basically authenticated. 40 csrf_token = csrf._get_new_csrf_key() 41 request.headers.cookie['csrf_token'] = csrf_token 42 request.headers['X-CSRF-TOKEN'] = csrf_token 43 if 'Referer' not in request.headers: 44 request.headers['Referer'] = \ 45 'https://%s/' % csrf._get_host(request) 46 elif 'session' in request.headers.cookie: 47 token = request.headers.cookie['session'].value 48 user = User.from_session_token(token) 49 50 if user is None: 51 user = User() 52 request.context['user'] = user 53 54 55 def check_role(request): 56 """Given a request object, possibly raise Response(403). 57 """ 58 59 # XXX We can't use this yet because we don't have an inbound Aspen hook 60 # that fires after the first page of the simplate is exec'd. 61 62 context = request.context 63 path = request.line.uri.path 64 65 if 'minimum_role' not in context: 66 raise NoMinimumRoleSpecified(request.fs) 67 68 minimum_role = context['minimum_role'] 69 if minimum_role not in ROLES: 70 raise BadMinimumRole(request.fs, minimum_role) 71 72 user = context['user'] 73 highest_role = user.get_highest_role(path.get('username', None)) 74 if ROLES.index(highest_role) < ROLES.index(minimum_role): 75 request.redirect('..') 76 77 78 def outbound(response): 79 if 'user' in response.request.context: 80 user = response.request.context['user'] 81 if not isinstance(user, User): 82 raise Response(400, "If you define 'user' in a simplate it has to " 83 "be a User instance.") 84 else: 85 user = User() 86 87 if user.ANON: # user is anonymous 88 if 'session' not in response.request.headers.cookie: 89 # no cookie in the request, don't set one on response 90 return 91 else: 92 # expired cookie in the request, instruct browser to delete it 93 response.headers.cookie['session'] = '' 94 expires = 0 95 else: # user is authenticated 96 response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache 97 response.headers.cookie['session'] = user.participant.session_token 98 expires = time.time() + TIMEOUT 99 user.keep_signed_in_until(expires) 100 101 cookie = response.headers.cookie['session'] 102 # I am not setting domain, because it is supposed to default to what we 103 # want: the domain of the object requested. 104 #cookie['domain'] 105 cookie['path'] = '/' 106 cookie['expires'] = rfc822.formatdate(expires) 107 cookie['httponly'] = "Yes, please." 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gittip/security/authentication.py b/gittip/security/authentication.py --- a/gittip/security/authentication.py +++ b/gittip/security/authentication.py @@ -3,6 +3,7 @@ import rfc822 import time +import gittip from aspen import Response from gittip.security import csrf from gittip.security.user import User @@ -105,3 +106,5 @@ cookie['path'] = '/' cookie['expires'] = rfc822.formatdate(expires) cookie['httponly'] = "Yes, please." + if gittip.canonical_scheme == 'https': + cookie['secure'] = "Yes, please."
{"golden_diff": "diff --git a/gittip/security/authentication.py b/gittip/security/authentication.py\n--- a/gittip/security/authentication.py\n+++ b/gittip/security/authentication.py\n@@ -3,6 +3,7 @@\n import rfc822\n import time\n \n+import gittip\n from aspen import Response\n from gittip.security import csrf\n from gittip.security.user import User\n@@ -105,3 +106,5 @@\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n+ if gittip.canonical_scheme == 'https':\n+ cookie['secure'] = \"Yes, please.\"\n", "issue": "can't stay logged in\nfrom @amaltson via [Twitter](https://twitter.com/amaltson/status/332678066985893889):\n\n> trying to log in with GitHub, each time I go to my account page, I get logged out.. \n\n", "before_files": [{"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport rfc822\nimport time\n\nfrom aspen import Response\nfrom gittip.security import csrf\nfrom gittip.security.user import User\n\n\nBEGINNING_OF_EPOCH = rfc822.formatdate(0)\nTIMEOUT = 60 * 60 * 24 * 7 # one week\nROLES = ['anonymous', 'authenticated', 'owner', 'admin']\nROLES_SHOULD_BE = \"It should be one of: {}.\".format(', '.join(ROLES))\n\n\nclass NoMinimumRoleSpecified(Exception):\n def __str__(self):\n return \"There is no minimum_role specified in the simplate at {}. {}\" \\\n .format(self.args[0], ROLES_SHOULD_BE)\n\nclass BadMinimumRole(Exception):\n def __str__(self):\n return \"The minimum_role specific in {} is bad: {}. {}\" \\\n .format(self.args[0], self.args[1], ROLES_SHOULD_BE)\n\n\ndef inbound(request):\n \"\"\"Authenticate from a cookie or an API key in basic auth.\n \"\"\"\n user = None\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if header.startswith('Basic '):\n creds = header[len('Basic '):].decode('base64')\n token, ignored = creds.split(':')\n user = User.from_api_key(token)\n\n # We don't require CSRF if they basically authenticated.\n csrf_token = csrf._get_new_csrf_key()\n request.headers.cookie['csrf_token'] = csrf_token\n request.headers['X-CSRF-TOKEN'] = csrf_token\n if 'Referer' not in request.headers:\n request.headers['Referer'] = \\\n 'https://%s/' % csrf._get_host(request)\n elif 'session' in request.headers.cookie:\n token = request.headers.cookie['session'].value\n user = User.from_session_token(token)\n\n if user is None:\n user = User()\n request.context['user'] = user\n\n\ndef check_role(request):\n \"\"\"Given a request object, possibly raise Response(403).\n \"\"\"\n\n # XXX We can't use this yet because we don't have an inbound Aspen hook\n # that fires after the first page of the simplate is exec'd.\n\n context = request.context\n path = request.line.uri.path\n\n if 'minimum_role' not in context:\n raise NoMinimumRoleSpecified(request.fs)\n\n minimum_role = context['minimum_role']\n if minimum_role not in ROLES:\n raise BadMinimumRole(request.fs, minimum_role)\n\n user = context['user']\n highest_role = user.get_highest_role(path.get('username', None))\n if ROLES.index(highest_role) < ROLES.index(minimum_role):\n request.redirect('..')\n\n\ndef outbound(response):\n if 'user' in response.request.context:\n user = response.request.context['user']\n if not isinstance(user, User):\n raise Response(400, \"If you define 'user' in a simplate it has to \"\n \"be a User instance.\")\n else:\n user = User()\n\n if user.ANON: # user is anonymous\n if 'session' not in response.request.headers.cookie:\n # no cookie in the request, don't set one on response\n return\n else:\n # expired cookie in the request, instruct browser to delete it\n response.headers.cookie['session'] = ''\n expires = 0\n else: # user is authenticated\n response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache\n response.headers.cookie['session'] = user.participant.session_token\n expires = time.time() + TIMEOUT\n user.keep_signed_in_until(expires)\n\n cookie = response.headers.cookie['session']\n # I am not setting domain, because it is supposed to default to what we\n # want: the domain of the object requested.\n #cookie['domain']\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n", "path": "gittip/security/authentication.py"}], "after_files": [{"content": "\"\"\"Defines website authentication helpers.\n\"\"\"\nimport rfc822\nimport time\n\nimport gittip\nfrom aspen import Response\nfrom gittip.security import csrf\nfrom gittip.security.user import User\n\n\nBEGINNING_OF_EPOCH = rfc822.formatdate(0)\nTIMEOUT = 60 * 60 * 24 * 7 # one week\nROLES = ['anonymous', 'authenticated', 'owner', 'admin']\nROLES_SHOULD_BE = \"It should be one of: {}.\".format(', '.join(ROLES))\n\n\nclass NoMinimumRoleSpecified(Exception):\n def __str__(self):\n return \"There is no minimum_role specified in the simplate at {}. {}\" \\\n .format(self.args[0], ROLES_SHOULD_BE)\n\nclass BadMinimumRole(Exception):\n def __str__(self):\n return \"The minimum_role specific in {} is bad: {}. {}\" \\\n .format(self.args[0], self.args[1], ROLES_SHOULD_BE)\n\n\ndef inbound(request):\n \"\"\"Authenticate from a cookie or an API key in basic auth.\n \"\"\"\n user = None\n if 'Authorization' in request.headers:\n header = request.headers['authorization']\n if header.startswith('Basic '):\n creds = header[len('Basic '):].decode('base64')\n token, ignored = creds.split(':')\n user = User.from_api_key(token)\n\n # We don't require CSRF if they basically authenticated.\n csrf_token = csrf._get_new_csrf_key()\n request.headers.cookie['csrf_token'] = csrf_token\n request.headers['X-CSRF-TOKEN'] = csrf_token\n if 'Referer' not in request.headers:\n request.headers['Referer'] = \\\n 'https://%s/' % csrf._get_host(request)\n elif 'session' in request.headers.cookie:\n token = request.headers.cookie['session'].value\n user = User.from_session_token(token)\n\n if user is None:\n user = User()\n request.context['user'] = user\n\n\ndef check_role(request):\n \"\"\"Given a request object, possibly raise Response(403).\n \"\"\"\n\n # XXX We can't use this yet because we don't have an inbound Aspen hook\n # that fires after the first page of the simplate is exec'd.\n\n context = request.context\n path = request.line.uri.path\n\n if 'minimum_role' not in context:\n raise NoMinimumRoleSpecified(request.fs)\n\n minimum_role = context['minimum_role']\n if minimum_role not in ROLES:\n raise BadMinimumRole(request.fs, minimum_role)\n\n user = context['user']\n highest_role = user.get_highest_role(path.get('username', None))\n if ROLES.index(highest_role) < ROLES.index(minimum_role):\n request.redirect('..')\n\n\ndef outbound(response):\n if 'user' in response.request.context:\n user = response.request.context['user']\n if not isinstance(user, User):\n raise Response(400, \"If you define 'user' in a simplate it has to \"\n \"be a User instance.\")\n else:\n user = User()\n\n if user.ANON: # user is anonymous\n if 'session' not in response.request.headers.cookie:\n # no cookie in the request, don't set one on response\n return\n else:\n # expired cookie in the request, instruct browser to delete it\n response.headers.cookie['session'] = ''\n expires = 0\n else: # user is authenticated\n response.headers['Expires'] = BEGINNING_OF_EPOCH # don't cache\n response.headers.cookie['session'] = user.participant.session_token\n expires = time.time() + TIMEOUT\n user.keep_signed_in_until(expires)\n\n cookie = response.headers.cookie['session']\n # I am not setting domain, because it is supposed to default to what we\n # want: the domain of the object requested.\n #cookie['domain']\n cookie['path'] = '/'\n cookie['expires'] = rfc822.formatdate(expires)\n cookie['httponly'] = \"Yes, please.\"\n if gittip.canonical_scheme == 'https':\n cookie['secure'] = \"Yes, please.\"\n", "path": "gittip/security/authentication.py"}]}
1,434
160
gh_patches_debug_34535
rasdani/github-patches
git_diff
openstates__openstates-scrapers-1988
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- WA failing since at least 2017-12-02 WA has been failing since 2017-12-02 Based on automated runs it appears that WA has not run successfully in 2 days (2017-12-02). ``` 05:08:57 INFO pupa: save person Annette Cleveland as person_5bae4a7a-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5bae4a7a-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "upper"} as membership_5bae4d54-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5bae4a7a-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "party", "name": "Democratic"} as membership_5bae4f52-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save person Steve Conway as person_5baed576-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5baed576-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "upper"} as membership_5baed9ea-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5baed576-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "party", "name": "Democratic"} as membership_5baedc4c-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save person Jeannie Darneille as person_5baf5dde-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5baf5dde-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "upper"} as membership_5baf60b8-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save membership 5baf5dde-d81a-11e7-9f94-0242ac110003 membership in ~{"classification": "party", "name": "Democratic"} as membership_5baf62b6-d81a-11e7-9f94-0242ac110003.json 05:08:57 INFO pupa: save person Manka Dhingra as person_5bafe70e-d81a-11e7-9f94-0242ac110003.json committees: {} no pupa_settings on path, using defaults wa (scrape, import) bills: {} people: {} raise ve File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module> load_entry_point('pupa', 'console_scripts', 'pupa')() File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 67, in main subcommands[args.subcommand].handle(args, other) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle return self.do_handle(args, other, juris) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 305, in do_handle report['scrape'] = self.do_scrape(juris, args, scrapers) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 173, in do_scrape report[scraper_name] = scraper.do_scrape(**scrape_args) File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 116, in do_scrape self.save_object(obj) File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 99, in save_object Traceback (most recent call last): File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 96, in save_object obj.validate() File "/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py", line 191, in validate self.__class__.__name__, self._id, '\n\t'+'\n\t'.join(errors) pupa.exceptions.ScrapeValueError: validation of Person 5bafe70e-d81a-11e7-9f94-0242ac110003 failed: None is not of type 'string' Failed validating 'type' in schema['properties']['contact_details']['items']['properties']['value']: {'minLength': 1, 'type': 'string'} On instance['contact_details'][0]['value']: None '' is too short Failed validating 'minLength' in schema['properties']['contact_details']['items']['properties']['value']: {'minLength': 1, 'type': 'string'} On instance['contact_details'][1]['value']: '' None is not of type 'string' Failed validating 'type' in schema['properties']['contact_details']['items']['properties']['value']: {'minLength': 1, 'type': 'string'} On instance['contact_details'][2]['value']: None ``` Visit http://bobsled.openstates.org for more info. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/wa/people.py` Content: ``` 1 import re 2 from pupa.scrape import Person, Scraper 3 from openstates.utils import LXMLMixin 4 5 6 class WAPersonScraper(Scraper, LXMLMixin): 7 8 def scrape(self, chamber=None): 9 chambers = [chamber] if chamber else ['upper', 'lower'] 10 for chamber in chambers: 11 if chamber == 'upper': 12 index_url = 'http://www.leg.wa.gov/senate/senators/Pages/default.aspx' 13 else: 14 index_url = 'http://www.leg.wa.gov/house/representatives/Pages/default.aspx' 15 yield from self.scrape_chamber(index_url, chamber) 16 17 def scrape_chamber(self, index_url, chamber): 18 doc = self.lxmlize(index_url) 19 20 # Email addresses are listed on a separate page. 21 email_list_url = 'http://app.leg.wa.gov/memberemail/Default.aspx' 22 email_doc = self.lxmlize(email_list_url) 23 24 for member in doc.xpath('//div[@id="allMembers"]/div[@class="memberInformation"]'): 25 (photo_url, ) = member.xpath('.//a[text()="Print Quality Photo"]/@href') 26 27 (title_name_party, ) = member.xpath('.//span[@class="memberName"]/text()') 28 (name, party) = re.search(r'^(?:Senator|Representative)\s(.+)\s\(([RD])\)$', 29 title_name_party).groups() 30 if party == 'R': 31 party = "Republican" 32 elif party == 'D': 33 party = "Democratic" 34 35 (district_name, _district_name, ) = member.xpath('.//a[contains(text(),' 36 ' " Legislative District")]/text()') 37 assert district_name == _district_name 38 district_num = re.search(r'(\d{1,2})\w{2} Legislative District', 39 district_name).group(1) 40 41 leg = Person(name=name, primary_org=chamber, image=photo_url, 42 district=district_num, party=party) 43 44 leg.add_link(member.xpath('.//a[contains(text(), "Home Page")]/@href')[0]) 45 46 capitol_office = member.xpath('.//div[@class="memberColumnTitle" and' 47 'text()=" Olympia Office"]/parent::div[1]/text()') 48 capitol_office = [l.strip() for l in capitol_office if l.strip()] 49 50 capitol_fax = None 51 capitol_phone = None 52 capitol_address = None 53 54 # Can't capture any information anyway if office data is empty, 55 # so we can skip if that's the case. 56 if capitol_office: 57 # Retrieve capitol office fax number. 58 if capitol_office[-1].startswith('Fax: '): 59 capitol_fax = capitol_office.pop().replace('Fax: ', "") 60 61 # Retrieve capitol office phone number. 62 capitol_phone = capitol_office.pop() 63 64 # Retrieve capitol office address. 65 capitol_address = '\n'.join(capitol_office) 66 67 # Retrieve the member's position from the email link. 68 # We need it to find the member's email address. 69 # These positions are enough to discriminate the chamber too (0 = upper, 1,2 = lower) 70 email_link_url = member.xpath('.//a[contains(@href, "memberEmail")]')[0].get('href') 71 position = re.search(r'/([[0-9]+)$', email_link_url).group(1) 72 73 # Need to get the email from the email page by matching - 74 # with the member's district and position 75 email = self.get_node( 76 email_doc, 77 './/tr/td/a[contains(@href, "memberEmail/{}/{}")]/parent::td/' 78 'following-sibling::td[1]/text()'.format( 79 district_num, 80 position)).strip() 81 82 leg.add_contact_detail(type='address', value=capitol_address, 83 note='Capitol Office') 84 leg.add_contact_detail(type='email', value=email, 85 note='Capitol Office') 86 leg.add_contact_detail(type='voice', value=capitol_phone, 87 note='Capitol Office') 88 leg.add_contact_detail(type='fax', value=str(capitol_fax), 89 note='Capitol Office') 90 _has_district_office = member.xpath('.//div[@class="memberColumnTitle" and' 91 'text()=" District Office"]') 92 if _has_district_office: 93 # Out of both chambers, only one member has multiple district offices, 94 # so ignore that. Also ignore the few members who have separate mailing addresses 95 district_office = member.xpath('.//div[@class="memberColumnTitle" and' 96 'text()=" District Office"]/parent::div[1]/text()') 97 district_office = [l.strip() for l in district_office if l.strip()] 98 _end_of_first_address = district_office.index([l for l in district_office if 99 re.search(r'\,\s*WA\s*\d{5}', l)][0] 100 ) 101 district_address = '\n'.join(district_office[0:(_end_of_first_address + 1)]) 102 try: 103 district_phone = district_office[(_end_of_first_address + 1)] 104 assert re.match(r'\(\d{3}\) \d{3} \- \d{4}', district_phone) 105 except IndexError: 106 pass 107 except AssertionError: 108 pass 109 110 leg.add_contact_detail(type='voice', value=district_phone, 111 note='District Office') 112 113 leg.add_contact_detail(type='address', value=district_address, 114 note='District Office') 115 leg.add_source(index_url) 116 leg.add_link(index_url) 117 yield leg 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/wa/people.py b/openstates/wa/people.py --- a/openstates/wa/people.py +++ b/openstates/wa/people.py @@ -79,14 +79,18 @@ district_num, position)).strip() - leg.add_contact_detail(type='address', value=capitol_address, - note='Capitol Office') - leg.add_contact_detail(type='email', value=email, - note='Capitol Office') - leg.add_contact_detail(type='voice', value=capitol_phone, - note='Capitol Office') - leg.add_contact_detail(type='fax', value=str(capitol_fax), - note='Capitol Office') + if capitol_address: + leg.add_contact_detail(type='address', value=capitol_address, + note='Capitol Office') + if email: + leg.add_contact_detail(type='email', value=email, + note='Capitol Office') + if capitol_phone: + leg.add_contact_detail(type='voice', value=capitol_phone, + note='Capitol Office') + if capitol_fax: + leg.add_contact_detail(type='fax', value=str(capitol_fax), + note='Capitol Office') _has_district_office = member.xpath('.//div[@class="memberColumnTitle" and' 'text()=" District Office"]') if _has_district_office: @@ -107,11 +111,13 @@ except AssertionError: pass - leg.add_contact_detail(type='voice', value=district_phone, - note='District Office') + if district_phone: + leg.add_contact_detail(type='voice', value=district_phone, + note='District Office') - leg.add_contact_detail(type='address', value=district_address, - note='District Office') + if district_address: + leg.add_contact_detail(type='address', value=district_address, + note='District Office') leg.add_source(index_url) leg.add_link(index_url) yield leg
{"golden_diff": "diff --git a/openstates/wa/people.py b/openstates/wa/people.py\n--- a/openstates/wa/people.py\n+++ b/openstates/wa/people.py\n@@ -79,14 +79,18 @@\n district_num,\n position)).strip()\n \n- leg.add_contact_detail(type='address', value=capitol_address,\n- note='Capitol Office')\n- leg.add_contact_detail(type='email', value=email,\n- note='Capitol Office')\n- leg.add_contact_detail(type='voice', value=capitol_phone,\n- note='Capitol Office')\n- leg.add_contact_detail(type='fax', value=str(capitol_fax),\n- note='Capitol Office')\n+ if capitol_address:\n+ leg.add_contact_detail(type='address', value=capitol_address,\n+ note='Capitol Office')\n+ if email:\n+ leg.add_contact_detail(type='email', value=email,\n+ note='Capitol Office')\n+ if capitol_phone:\n+ leg.add_contact_detail(type='voice', value=capitol_phone,\n+ note='Capitol Office')\n+ if capitol_fax:\n+ leg.add_contact_detail(type='fax', value=str(capitol_fax),\n+ note='Capitol Office')\n _has_district_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" District Office\"]')\n if _has_district_office:\n@@ -107,11 +111,13 @@\n except AssertionError:\n pass\n \n- leg.add_contact_detail(type='voice', value=district_phone,\n- note='District Office')\n+ if district_phone:\n+ leg.add_contact_detail(type='voice', value=district_phone,\n+ note='District Office')\n \n- leg.add_contact_detail(type='address', value=district_address,\n- note='District Office')\n+ if district_address:\n+ leg.add_contact_detail(type='address', value=district_address,\n+ note='District Office')\n leg.add_source(index_url)\n leg.add_link(index_url)\n yield leg\n", "issue": "WA failing since at least 2017-12-02\nWA has been failing since 2017-12-02\n\nBased on automated runs it appears that WA has not run successfully in 2 days (2017-12-02).\n\n\n```\n 05:08:57 INFO pupa: save person Annette Cleveland as person_5bae4a7a-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5bae4a7a-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"upper\"} as membership_5bae4d54-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5bae4a7a-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"party\", \"name\": \"Democratic\"} as membership_5bae4f52-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save person Steve Conway as person_5baed576-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5baed576-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"upper\"} as membership_5baed9ea-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5baed576-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"party\", \"name\": \"Democratic\"} as membership_5baedc4c-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save person Jeannie Darneille as person_5baf5dde-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5baf5dde-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"upper\"} as membership_5baf60b8-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save membership 5baf5dde-d81a-11e7-9f94-0242ac110003 membership in ~{\"classification\": \"party\", \"name\": \"Democratic\"} as membership_5baf62b6-d81a-11e7-9f94-0242ac110003.json\n05:08:57 INFO pupa: save person Manka Dhingra as person_5bafe70e-d81a-11e7-9f94-0242ac110003.json\n committees: {}\nno pupa_settings on path, using defaults\nwa (scrape, import)\n bills: {}\n people: {}\n raise ve\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 67, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 305, in do_handle\n report['scrape'] = self.do_scrape(juris, args, scrapers)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 173, in do_scrape\n report[scraper_name] = scraper.do_scrape(**scrape_args)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 116, in do_scrape\n self.save_object(obj)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 99, in save_object\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 96, in save_object\n obj.validate()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/scrape/base.py\", line 191, in validate\n self.__class__.__name__, self._id, '\\n\\t'+'\\n\\t'.join(errors)\npupa.exceptions.ScrapeValueError: validation of Person 5bafe70e-d81a-11e7-9f94-0242ac110003 failed: \n\tNone is not of type 'string'\nFailed validating 'type' in schema['properties']['contact_details']['items']['properties']['value']:\n {'minLength': 1, 'type': 'string'}\nOn instance['contact_details'][0]['value']:\n None\n\t'' is too short\nFailed validating 'minLength' in schema['properties']['contact_details']['items']['properties']['value']:\n {'minLength': 1, 'type': 'string'}\nOn instance['contact_details'][1]['value']:\n ''\n\tNone is not of type 'string'\nFailed validating 'type' in schema['properties']['contact_details']['items']['properties']['value']:\n {'minLength': 1, 'type': 'string'}\nOn instance['contact_details'][2]['value']:\n None\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "import re\nfrom pupa.scrape import Person, Scraper\nfrom openstates.utils import LXMLMixin\n\n\nclass WAPersonScraper(Scraper, LXMLMixin):\n\n def scrape(self, chamber=None):\n chambers = [chamber] if chamber else ['upper', 'lower']\n for chamber in chambers:\n if chamber == 'upper':\n index_url = 'http://www.leg.wa.gov/senate/senators/Pages/default.aspx'\n else:\n index_url = 'http://www.leg.wa.gov/house/representatives/Pages/default.aspx'\n yield from self.scrape_chamber(index_url, chamber)\n\n def scrape_chamber(self, index_url, chamber):\n doc = self.lxmlize(index_url)\n\n # Email addresses are listed on a separate page.\n email_list_url = 'http://app.leg.wa.gov/memberemail/Default.aspx'\n email_doc = self.lxmlize(email_list_url)\n\n for member in doc.xpath('//div[@id=\"allMembers\"]/div[@class=\"memberInformation\"]'):\n (photo_url, ) = member.xpath('.//a[text()=\"Print Quality Photo\"]/@href')\n\n (title_name_party, ) = member.xpath('.//span[@class=\"memberName\"]/text()')\n (name, party) = re.search(r'^(?:Senator|Representative)\\s(.+)\\s\\(([RD])\\)$',\n title_name_party).groups()\n if party == 'R':\n party = \"Republican\"\n elif party == 'D':\n party = \"Democratic\"\n\n (district_name, _district_name, ) = member.xpath('.//a[contains(text(),'\n ' \" Legislative District\")]/text()')\n assert district_name == _district_name\n district_num = re.search(r'(\\d{1,2})\\w{2} Legislative District',\n district_name).group(1)\n\n leg = Person(name=name, primary_org=chamber, image=photo_url,\n district=district_num, party=party)\n\n leg.add_link(member.xpath('.//a[contains(text(), \"Home Page\")]/@href')[0])\n\n capitol_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" Olympia Office\"]/parent::div[1]/text()')\n capitol_office = [l.strip() for l in capitol_office if l.strip()]\n\n capitol_fax = None\n capitol_phone = None\n capitol_address = None\n\n # Can't capture any information anyway if office data is empty,\n # so we can skip if that's the case.\n if capitol_office:\n # Retrieve capitol office fax number.\n if capitol_office[-1].startswith('Fax: '):\n capitol_fax = capitol_office.pop().replace('Fax: ', \"\")\n\n # Retrieve capitol office phone number.\n capitol_phone = capitol_office.pop()\n\n # Retrieve capitol office address.\n capitol_address = '\\n'.join(capitol_office)\n\n # Retrieve the member's position from the email link.\n # We need it to find the member's email address.\n # These positions are enough to discriminate the chamber too (0 = upper, 1,2 = lower)\n email_link_url = member.xpath('.//a[contains(@href, \"memberEmail\")]')[0].get('href')\n position = re.search(r'/([[0-9]+)$', email_link_url).group(1)\n\n # Need to get the email from the email page by matching -\n # with the member's district and position\n email = self.get_node(\n email_doc,\n './/tr/td/a[contains(@href, \"memberEmail/{}/{}\")]/parent::td/'\n 'following-sibling::td[1]/text()'.format(\n district_num,\n position)).strip()\n\n leg.add_contact_detail(type='address', value=capitol_address,\n note='Capitol Office')\n leg.add_contact_detail(type='email', value=email,\n note='Capitol Office')\n leg.add_contact_detail(type='voice', value=capitol_phone,\n note='Capitol Office')\n leg.add_contact_detail(type='fax', value=str(capitol_fax),\n note='Capitol Office')\n _has_district_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" District Office\"]')\n if _has_district_office:\n # Out of both chambers, only one member has multiple district offices,\n # so ignore that. Also ignore the few members who have separate mailing addresses\n district_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" District Office\"]/parent::div[1]/text()')\n district_office = [l.strip() for l in district_office if l.strip()]\n _end_of_first_address = district_office.index([l for l in district_office if\n re.search(r'\\,\\s*WA\\s*\\d{5}', l)][0]\n )\n district_address = '\\n'.join(district_office[0:(_end_of_first_address + 1)])\n try:\n district_phone = district_office[(_end_of_first_address + 1)]\n assert re.match(r'\\(\\d{3}\\) \\d{3} \\- \\d{4}', district_phone)\n except IndexError:\n pass\n except AssertionError:\n pass\n\n leg.add_contact_detail(type='voice', value=district_phone,\n note='District Office')\n\n leg.add_contact_detail(type='address', value=district_address,\n note='District Office')\n leg.add_source(index_url)\n leg.add_link(index_url)\n yield leg\n", "path": "openstates/wa/people.py"}], "after_files": [{"content": "import re\nfrom pupa.scrape import Person, Scraper\nfrom openstates.utils import LXMLMixin\n\n\nclass WAPersonScraper(Scraper, LXMLMixin):\n\n def scrape(self, chamber=None):\n chambers = [chamber] if chamber else ['upper', 'lower']\n for chamber in chambers:\n if chamber == 'upper':\n index_url = 'http://www.leg.wa.gov/senate/senators/Pages/default.aspx'\n else:\n index_url = 'http://www.leg.wa.gov/house/representatives/Pages/default.aspx'\n yield from self.scrape_chamber(index_url, chamber)\n\n def scrape_chamber(self, index_url, chamber):\n doc = self.lxmlize(index_url)\n\n # Email addresses are listed on a separate page.\n email_list_url = 'http://app.leg.wa.gov/memberemail/Default.aspx'\n email_doc = self.lxmlize(email_list_url)\n\n for member in doc.xpath('//div[@id=\"allMembers\"]/div[@class=\"memberInformation\"]'):\n (photo_url, ) = member.xpath('.//a[text()=\"Print Quality Photo\"]/@href')\n\n (title_name_party, ) = member.xpath('.//span[@class=\"memberName\"]/text()')\n (name, party) = re.search(r'^(?:Senator|Representative)\\s(.+)\\s\\(([RD])\\)$',\n title_name_party).groups()\n if party == 'R':\n party = \"Republican\"\n elif party == 'D':\n party = \"Democratic\"\n\n (district_name, _district_name, ) = member.xpath('.//a[contains(text(),'\n ' \" Legislative District\")]/text()')\n assert district_name == _district_name\n district_num = re.search(r'(\\d{1,2})\\w{2} Legislative District',\n district_name).group(1)\n\n leg = Person(name=name, primary_org=chamber, image=photo_url,\n district=district_num, party=party)\n\n leg.add_link(member.xpath('.//a[contains(text(), \"Home Page\")]/@href')[0])\n\n capitol_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" Olympia Office\"]/parent::div[1]/text()')\n capitol_office = [l.strip() for l in capitol_office if l.strip()]\n\n capitol_fax = None\n capitol_phone = None\n capitol_address = None\n\n # Can't capture any information anyway if office data is empty,\n # so we can skip if that's the case.\n if capitol_office:\n # Retrieve capitol office fax number.\n if capitol_office[-1].startswith('Fax: '):\n capitol_fax = capitol_office.pop().replace('Fax: ', \"\")\n\n # Retrieve capitol office phone number.\n capitol_phone = capitol_office.pop()\n\n # Retrieve capitol office address.\n capitol_address = '\\n'.join(capitol_office)\n\n # Retrieve the member's position from the email link.\n # We need it to find the member's email address.\n # These positions are enough to discriminate the chamber too (0 = upper, 1,2 = lower)\n email_link_url = member.xpath('.//a[contains(@href, \"memberEmail\")]')[0].get('href')\n position = re.search(r'/([[0-9]+)$', email_link_url).group(1)\n\n # Need to get the email from the email page by matching -\n # with the member's district and position\n email = self.get_node(\n email_doc,\n './/tr/td/a[contains(@href, \"memberEmail/{}/{}\")]/parent::td/'\n 'following-sibling::td[1]/text()'.format(\n district_num,\n position)).strip()\n\n if capitol_address:\n leg.add_contact_detail(type='address', value=capitol_address,\n note='Capitol Office')\n if email:\n leg.add_contact_detail(type='email', value=email,\n note='Capitol Office')\n if capitol_phone:\n leg.add_contact_detail(type='voice', value=capitol_phone,\n note='Capitol Office')\n if capitol_fax:\n leg.add_contact_detail(type='fax', value=str(capitol_fax),\n note='Capitol Office')\n _has_district_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" District Office\"]')\n if _has_district_office:\n # Out of both chambers, only one member has multiple district offices,\n # so ignore that. Also ignore the few members who have separate mailing addresses\n district_office = member.xpath('.//div[@class=\"memberColumnTitle\" and'\n 'text()=\" District Office\"]/parent::div[1]/text()')\n district_office = [l.strip() for l in district_office if l.strip()]\n _end_of_first_address = district_office.index([l for l in district_office if\n re.search(r'\\,\\s*WA\\s*\\d{5}', l)][0]\n )\n district_address = '\\n'.join(district_office[0:(_end_of_first_address + 1)])\n try:\n district_phone = district_office[(_end_of_first_address + 1)]\n assert re.match(r'\\(\\d{3}\\) \\d{3} \\- \\d{4}', district_phone)\n except IndexError:\n pass\n except AssertionError:\n pass\n\n if district_phone:\n leg.add_contact_detail(type='voice', value=district_phone,\n note='District Office')\n\n if district_address:\n leg.add_contact_detail(type='address', value=district_address,\n note='District Office')\n leg.add_source(index_url)\n leg.add_link(index_url)\n yield leg\n", "path": "openstates/wa/people.py"}]}
3,236
453
gh_patches_debug_42177
rasdani/github-patches
git_diff
rasterio__rasterio-795
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plot: extents from array plus transform Currently, when passing an array into the `rasterio.plot.show`, there is no way to set the extent axis labels. (`with_bounds` is only available with an open dataset). A common use case is plotting an intermediate result for which you have an array and affine but no open dataset. Would be nice to have the following produce axes in spatial coordinates ``` show(array, transform=aff) # would imply with_bounds=True ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/plot.py` Content: ``` 1 """Implementations of various common operations. 2 3 Including `show()` for displaying an array or with matplotlib. 4 Most can handle a numpy array or `rasterio.Band()`. 5 Primarily supports `$ rio insp`. 6 """ 7 8 from __future__ import absolute_import 9 10 import logging 11 import warnings 12 13 import numpy as np 14 15 import rasterio 16 from rasterio._io import RasterReader 17 18 from rasterio.compat import zip_longest 19 20 logger = logging.getLogger(__name__) 21 22 23 def get_plt(): 24 """import matplotlib.pyplot 25 raise import error if matplotlib is not installed 26 """ 27 try: 28 import matplotlib.pyplot as plt 29 return plt 30 except (ImportError, RuntimeError): # pragma: no cover 31 msg = "Could not import matplotlib\n" 32 msg += "matplotlib required for plotting functions" 33 raise ImportError(msg) 34 35 36 def show(source, with_bounds=True, contour=False, contour_label_kws=None, 37 ax=None, title=None, **kwargs): 38 """Display a raster or raster band using matplotlib. 39 40 Parameters 41 ---------- 42 source : array-like in raster axis order, 43 or (raster dataset, bidx) tuple, 44 or raster dataset, 45 If the tuple (raster dataset, bidx), 46 selects band `bidx` from raster. If raster dataset display the rgb image 47 as defined in the colorinterp metadata, or default to first band. 48 with_bounds : bool (opt) 49 Whether to change the image extent to the spatial bounds of the image, 50 rather than pixel coordinates. Only works when source is 51 (raster dataset, bidx) or raster dataset. 52 contour : bool (opt) 53 Whether to plot the raster data as contours 54 contour_label_kws : dictionary (opt) 55 Keyword arguments for labeling the contours, 56 empty dictionary for no labels. 57 ax : matplotlib axis (opt) 58 Axis to plot on, otherwise uses current axis. 59 title : str, optional 60 Title for the figure. 61 **kwargs : key, value pairings optional 62 These will be passed to the matplotlib imshow or contour method 63 depending on contour argument. 64 See full lists at: 65 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow 66 or 67 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour 68 69 Returns 70 ------- 71 ax : matplotlib Axes 72 Axes with plot. 73 """ 74 plt = get_plt() 75 76 if isinstance(source, tuple): 77 arr = source[0].read(source[1]) 78 if with_bounds: 79 kwargs['extent'] = plotting_extent(source[0]) 80 elif isinstance(source, RasterReader): 81 if source.count == 1: 82 arr = source.read(1, masked=True) 83 else: 84 try: 85 source_colorinterp = {source.colorinterp(n): n for n in source.indexes} 86 colorinterp = rasterio.enums.ColorInterp 87 rgb_indexes = [source_colorinterp[ci] for ci in 88 (colorinterp.red, colorinterp.green, colorinterp.blue)] 89 arr = source.read(rgb_indexes, masked=True) 90 arr = reshape_as_image(arr) 91 92 if with_bounds: 93 kwargs['extent'] = plotting_extent(source) 94 except KeyError: 95 arr = source.read(1, masked=True) 96 else: 97 # The source is a numpy array reshape it to image if it has 3+ bands 98 source = np.ma.squeeze(source) 99 if len(source.shape) >= 3: 100 arr = reshape_as_image(source) 101 else: 102 arr = source 103 104 show = False 105 if not ax: 106 show = True 107 ax = plt.gca() 108 109 if contour: 110 if 'cmap' not in kwargs: 111 kwargs['colors'] = kwargs.get('colors', 'red') 112 kwargs['linewidths'] = kwargs.get('linewidths', 1.5) 113 kwargs['alpha'] = kwargs.get('alpha', 0.8) 114 115 C = ax.contour(arr, origin='upper', **kwargs) 116 if contour_label_kws is None: 117 # no explicit label kws passed use defaults 118 contour_label_kws = dict(fontsize=8, 119 inline=True) 120 if contour_label_kws: 121 ax.clabel(C, **contour_label_kws) 122 else: 123 ax.imshow(arr, **kwargs) 124 if title: 125 ax.set_title(title, fontweight='bold') 126 127 if show: 128 plt.show() 129 130 return ax 131 132 133 def plotting_extent(source): 134 """Returns an extent in the format needed 135 for matplotlib's imshow (left, right, bottom, top) 136 instead of rasterio's bounds (left, bottom, top, right) 137 138 Parameters 139 ---------- 140 source : raster dataset 141 """ 142 extent = (source.bounds.left, source.bounds.right, 143 source.bounds.bottom, source.bounds.top) 144 return extent 145 146 147 def reshape_as_image(arr): 148 """Returns the source array reshaped into the order 149 expected by image processing and visualization software 150 (matplotlib, scikit-image, etc) 151 by swapping the axes order from (bands, rows, columns) 152 to (rows, columns, bands) 153 154 Parameters 155 ---------- 156 source : array-like in a of format (bands, rows, columns) 157 """ 158 # swap the axes order from (bands, rows, columns) to (rows, columns, bands) 159 im = np.ma.transpose(arr, [1,2,0]) 160 return im 161 162 163 164 def reshape_as_raster(arr): 165 """Returns the array in a raster order 166 by swapping the axes order from (rows, columns, bands) 167 to (bands, rows, columns) 168 169 Parameters 170 ---------- 171 arr : array-like in the image form of (rows, columns, bands) 172 """ 173 # swap the axes order from (rows, columns, bands) to (bands, rows, columns) 174 im = np.transpose(arr, [2,0,1]) 175 return im 176 177 178 def show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs): 179 """Easily display a histogram with matplotlib. 180 181 Parameters 182 ---------- 183 source : np.array or RasterReader, rasterio.Band or tuple(dataset, bidx) 184 Input data to display. The first three arrays in multi-dimensional 185 arrays are plotted as red, green, and blue. 186 bins : int, optional 187 Compute histogram across N bins. 188 masked : bool, optional 189 When working with a `rasterio.Band()` object, specifies if the data 190 should be masked on read. 191 title : str, optional 192 Title for the figure. 193 ax : matplotlib axes (opt) 194 The raster will be added to this axes if passed. 195 **kwargs : optional keyword arguments 196 These will be passed to the matplotlib hist method. See full list at: 197 http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist 198 """ 199 plt = get_plt() 200 201 if isinstance(source, RasterReader): 202 arr = source.read(masked=masked) 203 elif isinstance(source, (tuple, rasterio.Band)): 204 arr = source[0].read(source[1], masked=masked) 205 else: 206 arr = source 207 208 # The histogram is computed individually for each 'band' in the array 209 # so we need the overall min/max to constrain the plot 210 rng = arr.min(), arr.max() 211 212 if len(arr.shape) is 2: 213 arr = np.expand_dims(arr.flatten(), 0).T 214 colors = ['gold'] 215 else: 216 arr = arr.reshape(arr.shape[0], -1).T 217 colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown'] 218 219 #The goal is to provide a curated set of colors for working with 220 # smaller datasets and let matplotlib define additional colors when 221 # working with larger datasets. 222 if arr.shape[-1] > len(colors): 223 n = arr.shape[-1] - len(colors) 224 colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n)))) 225 else: 226 colors = colors[:arr.shape[-1]] 227 228 # If a rasterio.Band() is given make sure the proper index is displayed 229 # in the legend. 230 if isinstance(source, (tuple, rasterio.Band)): 231 labels = [str(source[1])] 232 else: 233 labels = (str(i + 1) for i in range(len(arr))) 234 235 if ax: 236 show = False 237 else: 238 show = True 239 ax = plt.gca() 240 241 fig = ax.get_figure() 242 243 ax.hist(arr, 244 bins=bins, 245 color=colors, 246 label=labels, 247 range=rng, 248 **kwargs) 249 250 ax.legend(loc="upper right") 251 ax.set_title(title, fontweight='bold') 252 ax.grid(True) 253 ax.set_xlabel('DN') 254 ax.set_ylabel('Frequency') 255 if show: 256 plt.show() 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/plot.py b/rasterio/plot.py --- a/rasterio/plot.py +++ b/rasterio/plot.py @@ -14,7 +14,7 @@ import rasterio from rasterio._io import RasterReader - +from rasterio.transform import guard_transform from rasterio.compat import zip_longest logger = logging.getLogger(__name__) @@ -34,7 +34,7 @@ def show(source, with_bounds=True, contour=False, contour_label_kws=None, - ax=None, title=None, **kwargs): + ax=None, title=None, transform=None, **kwargs): """Display a raster or raster band using matplotlib. Parameters @@ -58,6 +58,8 @@ Axis to plot on, otherwise uses current axis. title : str, optional Title for the figure. + transform : Affine, optional + Defines the affine transform if source is an array **kwargs : key, value pairings optional These will be passed to the matplotlib imshow or contour method depending on contour argument. @@ -100,6 +102,8 @@ arr = reshape_as_image(source) else: arr = source + if transform and with_bounds: + kwargs['extent'] = plotting_extent(arr, transform) show = False if not ax: @@ -130,17 +134,29 @@ return ax -def plotting_extent(source): +def plotting_extent(source, transform=None): """Returns an extent in the format needed for matplotlib's imshow (left, right, bottom, top) instead of rasterio's bounds (left, bottom, top, right) Parameters ---------- - source : raster dataset + source : raster dataset or array in image order (see reshape_as_image) + transform: Affine, required if source is array """ - extent = (source.bounds.left, source.bounds.right, - source.bounds.bottom, source.bounds.top) + if hasattr(source, 'bounds'): + extent = (source.bounds.left, source.bounds.right, + source.bounds.bottom, source.bounds.top) + elif not transform: + raise ValueError( + "transform is required if source is an array") + else: + transform = guard_transform(transform) + rows, cols = source.shape[0:2] + left, top = transform * (0, 0) + right, bottom = transform * (cols, rows) + extent = (left, right, bottom, top) + return extent @@ -216,7 +232,7 @@ arr = arr.reshape(arr.shape[0], -1).T colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown'] - #The goal is to provide a curated set of colors for working with + # The goal is to provide a curated set of colors for working with # smaller datasets and let matplotlib define additional colors when # working with larger datasets. if arr.shape[-1] > len(colors): @@ -241,11 +257,11 @@ fig = ax.get_figure() ax.hist(arr, - bins=bins, - color=colors, - label=labels, - range=rng, - **kwargs) + bins=bins, + color=colors, + label=labels, + range=rng, + **kwargs) ax.legend(loc="upper right") ax.set_title(title, fontweight='bold')
{"golden_diff": "diff --git a/rasterio/plot.py b/rasterio/plot.py\n--- a/rasterio/plot.py\n+++ b/rasterio/plot.py\n@@ -14,7 +14,7 @@\n \n import rasterio\n from rasterio._io import RasterReader\n-\n+from rasterio.transform import guard_transform\n from rasterio.compat import zip_longest\n \n logger = logging.getLogger(__name__)\n@@ -34,7 +34,7 @@\n \n \n def show(source, with_bounds=True, contour=False, contour_label_kws=None,\n- ax=None, title=None, **kwargs):\n+ ax=None, title=None, transform=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n \n Parameters\n@@ -58,6 +58,8 @@\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n+ transform : Affine, optional\n+ Defines the affine transform if source is an array\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n@@ -100,6 +102,8 @@\n arr = reshape_as_image(source)\n else:\n arr = source\n+ if transform and with_bounds:\n+ kwargs['extent'] = plotting_extent(arr, transform)\n \n show = False\n if not ax:\n@@ -130,17 +134,29 @@\n return ax\n \n \n-def plotting_extent(source):\n+def plotting_extent(source, transform=None):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n \n Parameters\n ----------\n- source : raster dataset\n+ source : raster dataset or array in image order (see reshape_as_image)\n+ transform: Affine, required if source is array\n \"\"\"\n- extent = (source.bounds.left, source.bounds.right,\n- source.bounds.bottom, source.bounds.top)\n+ if hasattr(source, 'bounds'):\n+ extent = (source.bounds.left, source.bounds.right,\n+ source.bounds.bottom, source.bounds.top)\n+ elif not transform:\n+ raise ValueError(\n+ \"transform is required if source is an array\")\n+ else:\n+ transform = guard_transform(transform)\n+ rows, cols = source.shape[0:2]\n+ left, top = transform * (0, 0)\n+ right, bottom = transform * (cols, rows)\n+ extent = (left, right, bottom, top)\n+\n return extent\n \n \n@@ -216,7 +232,7 @@\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n \n- #The goal is to provide a curated set of colors for working with\n+ # The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n@@ -241,11 +257,11 @@\n fig = ax.get_figure()\n \n ax.hist(arr,\n- bins=bins,\n- color=colors,\n- label=labels,\n- range=rng,\n- **kwargs)\n+ bins=bins,\n+ color=colors,\n+ label=labels,\n+ range=rng,\n+ **kwargs)\n \n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n", "issue": "Plot: extents from array plus transform \nCurrently, when passing an array into the `rasterio.plot.show`, there is no way to set the extent axis labels. (`with_bounds` is only available with an open dataset).\n\nA common use case is plotting an intermediate result for which you have an array and affine but no open dataset. Would be nice to have the following produce axes in spatial coordinates\n\n```\nshow(array, transform=aff) # would imply with_bounds=True\n```\n\n", "before_files": [{"content": "\"\"\"Implementations of various common operations.\n\nIncluding `show()` for displaying an array or with matplotlib.\nMost can handle a numpy array or `rasterio.Band()`.\nPrimarily supports `$ rio insp`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._io import RasterReader\n\nfrom rasterio.compat import zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_plt():\n \"\"\"import matplotlib.pyplot\n raise import error if matplotlib is not installed\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n return plt\n except (ImportError, RuntimeError): # pragma: no cover\n msg = \"Could not import matplotlib\\n\"\n msg += \"matplotlib required for plotting functions\"\n raise ImportError(msg)\n\n\ndef show(source, with_bounds=True, contour=False, contour_label_kws=None,\n ax=None, title=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n\n Parameters\n ----------\n source : array-like in raster axis order,\n or (raster dataset, bidx) tuple,\n or raster dataset,\n If the tuple (raster dataset, bidx),\n selects band `bidx` from raster. If raster dataset display the rgb image\n as defined in the colorinterp metadata, or default to first band.\n with_bounds : bool (opt)\n Whether to change the image extent to the spatial bounds of the image,\n rather than pixel coordinates. Only works when source is\n (raster dataset, bidx) or raster dataset.\n contour : bool (opt)\n Whether to plot the raster data as contours\n contour_label_kws : dictionary (opt)\n Keyword arguments for labeling the contours,\n empty dictionary for no labels.\n ax : matplotlib axis (opt)\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n See full lists at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow\n or\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with plot.\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n if with_bounds:\n kwargs['extent'] = plotting_extent(source[0])\n elif isinstance(source, RasterReader):\n if source.count == 1:\n arr = source.read(1, masked=True)\n else:\n try:\n source_colorinterp = {source.colorinterp(n): n for n in source.indexes}\n colorinterp = rasterio.enums.ColorInterp\n rgb_indexes = [source_colorinterp[ci] for ci in\n (colorinterp.red, colorinterp.green, colorinterp.blue)]\n arr = source.read(rgb_indexes, masked=True)\n arr = reshape_as_image(arr)\n\n if with_bounds:\n kwargs['extent'] = plotting_extent(source)\n except KeyError:\n arr = source.read(1, masked=True)\n else:\n # The source is a numpy array reshape it to image if it has 3+ bands\n source = np.ma.squeeze(source)\n if len(source.shape) >= 3:\n arr = reshape_as_image(source)\n else:\n arr = source\n\n show = False\n if not ax:\n show = True\n ax = plt.gca()\n\n if contour:\n if 'cmap' not in kwargs:\n kwargs['colors'] = kwargs.get('colors', 'red')\n kwargs['linewidths'] = kwargs.get('linewidths', 1.5)\n kwargs['alpha'] = kwargs.get('alpha', 0.8)\n\n C = ax.contour(arr, origin='upper', **kwargs)\n if contour_label_kws is None:\n # no explicit label kws passed use defaults\n contour_label_kws = dict(fontsize=8,\n inline=True)\n if contour_label_kws:\n ax.clabel(C, **contour_label_kws)\n else:\n ax.imshow(arr, **kwargs)\n if title:\n ax.set_title(title, fontweight='bold')\n\n if show:\n plt.show()\n\n return ax\n\n\ndef plotting_extent(source):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n\n Parameters\n ----------\n source : raster dataset\n \"\"\"\n extent = (source.bounds.left, source.bounds.right,\n source.bounds.bottom, source.bounds.top)\n return extent\n\n\ndef reshape_as_image(arr):\n \"\"\"Returns the source array reshaped into the order\n expected by image processing and visualization software\n (matplotlib, scikit-image, etc)\n by swapping the axes order from (bands, rows, columns)\n to (rows, columns, bands)\n\n Parameters\n ----------\n source : array-like in a of format (bands, rows, columns)\n \"\"\"\n # swap the axes order from (bands, rows, columns) to (rows, columns, bands)\n im = np.ma.transpose(arr, [1,2,0])\n return im\n\n\n\ndef reshape_as_raster(arr):\n \"\"\"Returns the array in a raster order\n by swapping the axes order from (rows, columns, bands)\n to (bands, rows, columns)\n\n Parameters\n ----------\n arr : array-like in the image form of (rows, columns, bands)\n \"\"\"\n # swap the axes order from (rows, columns, bands) to (bands, rows, columns)\n im = np.transpose(arr, [2,0,1])\n return im\n\n\ndef show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):\n \"\"\"Easily display a histogram with matplotlib.\n\n Parameters\n ----------\n source : np.array or RasterReader, rasterio.Band or tuple(dataset, bidx)\n Input data to display. The first three arrays in multi-dimensional\n arrays are plotted as red, green, and blue.\n bins : int, optional\n Compute histogram across N bins.\n masked : bool, optional\n When working with a `rasterio.Band()` object, specifies if the data\n should be masked on read.\n title : str, optional\n Title for the figure.\n ax : matplotlib axes (opt)\n The raster will be added to this axes if passed.\n **kwargs : optional keyword arguments\n These will be passed to the matplotlib hist method. See full list at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, RasterReader):\n arr = source.read(masked=masked)\n elif isinstance(source, (tuple, rasterio.Band)):\n arr = source[0].read(source[1], masked=masked)\n else:\n arr = source\n\n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n rng = arr.min(), arr.max()\n\n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n colors = ['gold']\n else:\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n\n #The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n n = arr.shape[-1] - len(colors)\n colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))\n else:\n colors = colors[:arr.shape[-1]]\n\n # If a rasterio.Band() is given make sure the proper index is displayed\n # in the legend.\n if isinstance(source, (tuple, rasterio.Band)):\n labels = [str(source[1])]\n else:\n labels = (str(i + 1) for i in range(len(arr)))\n\n if ax:\n show = False\n else:\n show = True\n ax = plt.gca()\n\n fig = ax.get_figure()\n\n ax.hist(arr,\n bins=bins,\n color=colors,\n label=labels,\n range=rng,\n **kwargs)\n\n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n ax.grid(True)\n ax.set_xlabel('DN')\n ax.set_ylabel('Frequency')\n if show:\n plt.show()\n", "path": "rasterio/plot.py"}], "after_files": [{"content": "\"\"\"Implementations of various common operations.\n\nIncluding `show()` for displaying an array or with matplotlib.\nMost can handle a numpy array or `rasterio.Band()`.\nPrimarily supports `$ rio insp`.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\nimport warnings\n\nimport numpy as np\n\nimport rasterio\nfrom rasterio._io import RasterReader\nfrom rasterio.transform import guard_transform\nfrom rasterio.compat import zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_plt():\n \"\"\"import matplotlib.pyplot\n raise import error if matplotlib is not installed\n \"\"\"\n try:\n import matplotlib.pyplot as plt\n return plt\n except (ImportError, RuntimeError): # pragma: no cover\n msg = \"Could not import matplotlib\\n\"\n msg += \"matplotlib required for plotting functions\"\n raise ImportError(msg)\n\n\ndef show(source, with_bounds=True, contour=False, contour_label_kws=None,\n ax=None, title=None, transform=None, **kwargs):\n \"\"\"Display a raster or raster band using matplotlib.\n\n Parameters\n ----------\n source : array-like in raster axis order,\n or (raster dataset, bidx) tuple,\n or raster dataset,\n If the tuple (raster dataset, bidx),\n selects band `bidx` from raster. If raster dataset display the rgb image\n as defined in the colorinterp metadata, or default to first band.\n with_bounds : bool (opt)\n Whether to change the image extent to the spatial bounds of the image,\n rather than pixel coordinates. Only works when source is\n (raster dataset, bidx) or raster dataset.\n contour : bool (opt)\n Whether to plot the raster data as contours\n contour_label_kws : dictionary (opt)\n Keyword arguments for labeling the contours,\n empty dictionary for no labels.\n ax : matplotlib axis (opt)\n Axis to plot on, otherwise uses current axis.\n title : str, optional\n Title for the figure.\n transform : Affine, optional\n Defines the affine transform if source is an array\n **kwargs : key, value pairings optional\n These will be passed to the matplotlib imshow or contour method\n depending on contour argument.\n See full lists at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.imshow\n or\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.contour\n\n Returns\n -------\n ax : matplotlib Axes\n Axes with plot.\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, tuple):\n arr = source[0].read(source[1])\n if with_bounds:\n kwargs['extent'] = plotting_extent(source[0])\n elif isinstance(source, RasterReader):\n if source.count == 1:\n arr = source.read(1, masked=True)\n else:\n try:\n source_colorinterp = {source.colorinterp(n): n for n in source.indexes}\n colorinterp = rasterio.enums.ColorInterp\n rgb_indexes = [source_colorinterp[ci] for ci in\n (colorinterp.red, colorinterp.green, colorinterp.blue)]\n arr = source.read(rgb_indexes, masked=True)\n arr = reshape_as_image(arr)\n\n if with_bounds:\n kwargs['extent'] = plotting_extent(source)\n except KeyError:\n arr = source.read(1, masked=True)\n else:\n # The source is a numpy array reshape it to image if it has 3+ bands\n source = np.ma.squeeze(source)\n if len(source.shape) >= 3:\n arr = reshape_as_image(source)\n else:\n arr = source\n if transform and with_bounds:\n kwargs['extent'] = plotting_extent(arr, transform)\n\n show = False\n if not ax:\n show = True\n ax = plt.gca()\n\n if contour:\n if 'cmap' not in kwargs:\n kwargs['colors'] = kwargs.get('colors', 'red')\n kwargs['linewidths'] = kwargs.get('linewidths', 1.5)\n kwargs['alpha'] = kwargs.get('alpha', 0.8)\n\n C = ax.contour(arr, origin='upper', **kwargs)\n if contour_label_kws is None:\n # no explicit label kws passed use defaults\n contour_label_kws = dict(fontsize=8,\n inline=True)\n if contour_label_kws:\n ax.clabel(C, **contour_label_kws)\n else:\n ax.imshow(arr, **kwargs)\n if title:\n ax.set_title(title, fontweight='bold')\n\n if show:\n plt.show()\n\n return ax\n\n\ndef plotting_extent(source, transform=None):\n \"\"\"Returns an extent in the format needed\n for matplotlib's imshow (left, right, bottom, top)\n instead of rasterio's bounds (left, bottom, top, right)\n\n Parameters\n ----------\n source : raster dataset or array in image order (see reshape_as_image)\n transform: Affine, required if source is array\n \"\"\"\n if hasattr(source, 'bounds'):\n extent = (source.bounds.left, source.bounds.right,\n source.bounds.bottom, source.bounds.top)\n elif not transform:\n raise ValueError(\n \"transform is required if source is an array\")\n else:\n transform = guard_transform(transform)\n rows, cols = source.shape[0:2]\n left, top = transform * (0, 0)\n right, bottom = transform * (cols, rows)\n extent = (left, right, bottom, top)\n\n return extent\n\n\ndef reshape_as_image(arr):\n \"\"\"Returns the source array reshaped into the order\n expected by image processing and visualization software\n (matplotlib, scikit-image, etc)\n by swapping the axes order from (bands, rows, columns)\n to (rows, columns, bands)\n\n Parameters\n ----------\n source : array-like in a of format (bands, rows, columns)\n \"\"\"\n # swap the axes order from (bands, rows, columns) to (rows, columns, bands)\n im = np.ma.transpose(arr, [1,2,0])\n return im\n\n\n\ndef reshape_as_raster(arr):\n \"\"\"Returns the array in a raster order\n by swapping the axes order from (rows, columns, bands)\n to (bands, rows, columns)\n\n Parameters\n ----------\n arr : array-like in the image form of (rows, columns, bands)\n \"\"\"\n # swap the axes order from (rows, columns, bands) to (bands, rows, columns)\n im = np.transpose(arr, [2,0,1])\n return im\n\n\ndef show_hist(source, bins=10, masked=True, title='Histogram', ax=None, **kwargs):\n \"\"\"Easily display a histogram with matplotlib.\n\n Parameters\n ----------\n source : np.array or RasterReader, rasterio.Band or tuple(dataset, bidx)\n Input data to display. The first three arrays in multi-dimensional\n arrays are plotted as red, green, and blue.\n bins : int, optional\n Compute histogram across N bins.\n masked : bool, optional\n When working with a `rasterio.Band()` object, specifies if the data\n should be masked on read.\n title : str, optional\n Title for the figure.\n ax : matplotlib axes (opt)\n The raster will be added to this axes if passed.\n **kwargs : optional keyword arguments\n These will be passed to the matplotlib hist method. See full list at:\n http://matplotlib.org/api/axes_api.html?highlight=imshow#matplotlib.axes.Axes.hist\n \"\"\"\n plt = get_plt()\n\n if isinstance(source, RasterReader):\n arr = source.read(masked=masked)\n elif isinstance(source, (tuple, rasterio.Band)):\n arr = source[0].read(source[1], masked=masked)\n else:\n arr = source\n\n # The histogram is computed individually for each 'band' in the array\n # so we need the overall min/max to constrain the plot\n rng = arr.min(), arr.max()\n\n if len(arr.shape) is 2:\n arr = np.expand_dims(arr.flatten(), 0).T\n colors = ['gold']\n else:\n arr = arr.reshape(arr.shape[0], -1).T\n colors = ['red', 'green', 'blue', 'violet', 'gold', 'saddlebrown']\n\n # The goal is to provide a curated set of colors for working with\n # smaller datasets and let matplotlib define additional colors when\n # working with larger datasets.\n if arr.shape[-1] > len(colors):\n n = arr.shape[-1] - len(colors)\n colors.extend(np.ndarray.tolist(plt.get_cmap('Accent')(np.linspace(0, 1, n))))\n else:\n colors = colors[:arr.shape[-1]]\n\n # If a rasterio.Band() is given make sure the proper index is displayed\n # in the legend.\n if isinstance(source, (tuple, rasterio.Band)):\n labels = [str(source[1])]\n else:\n labels = (str(i + 1) for i in range(len(arr)))\n\n if ax:\n show = False\n else:\n show = True\n ax = plt.gca()\n\n fig = ax.get_figure()\n\n ax.hist(arr,\n bins=bins,\n color=colors,\n label=labels,\n range=rng,\n **kwargs)\n\n ax.legend(loc=\"upper right\")\n ax.set_title(title, fontweight='bold')\n ax.grid(True)\n ax.set_xlabel('DN')\n ax.set_ylabel('Frequency')\n if show:\n plt.show()\n", "path": "rasterio/plot.py"}]}
2,972
802
gh_patches_debug_36102
rasdani/github-patches
git_diff
python-pillow__Pillow-4311
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- wmf file to bitmap was forced to 72 dpi <!-- 2. Bug report: include a self-contained, copy-pastable example that generates the issue if possible. Be concise with code posted. Guidelines on how to provide a good bug report: --> ### What did you do? I read in an wmf image and then save it as a png image. I found the resolution of the result image is rather low no matter how I set the quality parameters. When I looking into the source code ( src/PIL/WmfImagePlugin.py), I found the resolution was fixed to 72dpi. ### What are your OS, Python and Pillow versions? * OS: win10 * Python: 3.6.8 * Pillow: 6.1.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/PIL/WmfImagePlugin.py` Content: ``` 1 # 2 # The Python Imaging Library 3 # $Id$ 4 # 5 # WMF stub codec 6 # 7 # history: 8 # 1996-12-14 fl Created 9 # 2004-02-22 fl Turned into a stub driver 10 # 2004-02-23 fl Added EMF support 11 # 12 # Copyright (c) Secret Labs AB 1997-2004. All rights reserved. 13 # Copyright (c) Fredrik Lundh 1996. 14 # 15 # See the README file for information on usage and redistribution. 16 # 17 # WMF/EMF reference documentation: 18 # https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf 19 # http://wvware.sourceforge.net/caolan/index.html 20 # http://wvware.sourceforge.net/caolan/ora-wmf.html 21 22 from . import Image, ImageFile 23 from ._binary import i16le as word, i32le as dword, si16le as short, si32le as _long 24 25 _handler = None 26 27 28 def register_handler(handler): 29 """ 30 Install application-specific WMF image handler. 31 32 :param handler: Handler object. 33 """ 34 global _handler 35 _handler = handler 36 37 38 if hasattr(Image.core, "drawwmf"): 39 # install default handler (windows only) 40 41 class WmfHandler: 42 def open(self, im): 43 im.mode = "RGB" 44 self.bbox = im.info["wmf_bbox"] 45 46 def load(self, im): 47 im.fp.seek(0) # rewind 48 return Image.frombytes( 49 "RGB", 50 im.size, 51 Image.core.drawwmf(im.fp.read(), im.size, self.bbox), 52 "raw", 53 "BGR", 54 (im.size[0] * 3 + 3) & -4, 55 -1, 56 ) 57 58 register_handler(WmfHandler()) 59 60 # 61 # -------------------------------------------------------------------- 62 # Read WMF file 63 64 65 def _accept(prefix): 66 return ( 67 prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00" 68 ) 69 70 71 ## 72 # Image plugin for Windows metafiles. 73 74 75 class WmfStubImageFile(ImageFile.StubImageFile): 76 77 format = "WMF" 78 format_description = "Windows Metafile" 79 80 def _open(self): 81 82 # check placable header 83 s = self.fp.read(80) 84 85 if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00": 86 87 # placeable windows metafile 88 89 # get units per inch 90 inch = word(s, 14) 91 92 # get bounding box 93 x0 = short(s, 6) 94 y0 = short(s, 8) 95 x1 = short(s, 10) 96 y1 = short(s, 12) 97 98 # normalize size to 72 dots per inch 99 size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch 100 101 self.info["wmf_bbox"] = x0, y0, x1, y1 102 103 self.info["dpi"] = 72 104 105 # sanity check (standard metafile header) 106 if s[22:26] != b"\x01\x00\t\x00": 107 raise SyntaxError("Unsupported WMF file format") 108 109 elif dword(s) == 1 and s[40:44] == b" EMF": 110 # enhanced metafile 111 112 # get bounding box 113 x0 = _long(s, 8) 114 y0 = _long(s, 12) 115 x1 = _long(s, 16) 116 y1 = _long(s, 20) 117 118 # get frame (in 0.01 millimeter units) 119 frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) 120 121 # normalize size to 72 dots per inch 122 size = x1 - x0, y1 - y0 123 124 # calculate dots per inch from bbox and frame 125 xdpi = int(2540.0 * (x1 - y0) / (frame[2] - frame[0]) + 0.5) 126 ydpi = int(2540.0 * (y1 - y0) / (frame[3] - frame[1]) + 0.5) 127 128 self.info["wmf_bbox"] = x0, y0, x1, y1 129 130 if xdpi == ydpi: 131 self.info["dpi"] = xdpi 132 else: 133 self.info["dpi"] = xdpi, ydpi 134 135 else: 136 raise SyntaxError("Unsupported file format") 137 138 self.mode = "RGB" 139 self._size = size 140 141 loader = self._load() 142 if loader: 143 loader.open(self) 144 145 def _load(self): 146 return _handler 147 148 149 def _save(im, fp, filename): 150 if _handler is None or not hasattr(_handler, "save"): 151 raise OSError("WMF save handler not installed") 152 _handler.save(im, fp, filename) 153 154 155 # 156 # -------------------------------------------------------------------- 157 # Registry stuff 158 159 160 Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) 161 Image.register_save(WmfStubImageFile.format, _save) 162 163 Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/PIL/WmfImagePlugin.py b/src/PIL/WmfImagePlugin.py --- a/src/PIL/WmfImagePlugin.py +++ b/src/PIL/WmfImagePlugin.py @@ -78,6 +78,7 @@ format_description = "Windows Metafile" def _open(self): + self._inch = None # check placable header s = self.fp.read(80) @@ -87,7 +88,7 @@ # placeable windows metafile # get units per inch - inch = word(s, 14) + self._inch = word(s, 14) # get bounding box x0 = short(s, 6) @@ -96,12 +97,14 @@ y1 = short(s, 12) # normalize size to 72 dots per inch - size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch + self.info["dpi"] = 72 + size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) self.info["wmf_bbox"] = x0, y0, x1, y1 - self.info["dpi"] = 72 - # sanity check (standard metafile header) if s[22:26] != b"\x01\x00\t\x00": raise SyntaxError("Unsupported WMF file format") @@ -118,7 +121,6 @@ # get frame (in 0.01 millimeter units) frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) - # normalize size to 72 dots per inch size = x1 - x0, y1 - y0 # calculate dots per inch from bbox and frame @@ -145,6 +147,16 @@ def _load(self): return _handler + def load(self, dpi=None): + if dpi is not None and self._inch is not None: + self.info["dpi"] = int(dpi + 0.5) + x0, y0, x1, y1 = self.info["wmf_bbox"] + self._size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) + super().load() + def _save(im, fp, filename): if _handler is None or not hasattr(_handler, "save"):
{"golden_diff": "diff --git a/src/PIL/WmfImagePlugin.py b/src/PIL/WmfImagePlugin.py\n--- a/src/PIL/WmfImagePlugin.py\n+++ b/src/PIL/WmfImagePlugin.py\n@@ -78,6 +78,7 @@\n format_description = \"Windows Metafile\"\n \n def _open(self):\n+ self._inch = None\n \n # check placable header\n s = self.fp.read(80)\n@@ -87,7 +88,7 @@\n # placeable windows metafile\n \n # get units per inch\n- inch = word(s, 14)\n+ self._inch = word(s, 14)\n \n # get bounding box\n x0 = short(s, 6)\n@@ -96,12 +97,14 @@\n y1 = short(s, 12)\n \n # normalize size to 72 dots per inch\n- size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch\n+ self.info[\"dpi\"] = 72\n+ size = (\n+ (x1 - x0) * self.info[\"dpi\"] // self._inch,\n+ (y1 - y0) * self.info[\"dpi\"] // self._inch,\n+ )\n \n self.info[\"wmf_bbox\"] = x0, y0, x1, y1\n \n- self.info[\"dpi\"] = 72\n-\n # sanity check (standard metafile header)\n if s[22:26] != b\"\\x01\\x00\\t\\x00\":\n raise SyntaxError(\"Unsupported WMF file format\")\n@@ -118,7 +121,6 @@\n # get frame (in 0.01 millimeter units)\n frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)\n \n- # normalize size to 72 dots per inch\n size = x1 - x0, y1 - y0\n \n # calculate dots per inch from bbox and frame\n@@ -145,6 +147,16 @@\n def _load(self):\n return _handler\n \n+ def load(self, dpi=None):\n+ if dpi is not None and self._inch is not None:\n+ self.info[\"dpi\"] = int(dpi + 0.5)\n+ x0, y0, x1, y1 = self.info[\"wmf_bbox\"]\n+ self._size = (\n+ (x1 - x0) * self.info[\"dpi\"] // self._inch,\n+ (y1 - y0) * self.info[\"dpi\"] // self._inch,\n+ )\n+ super().load()\n+\n \n def _save(im, fp, filename):\n if _handler is None or not hasattr(_handler, \"save\"):\n", "issue": "wmf file to bitmap was forced to 72 dpi\n<!--\r\n2. Bug report: include a self-contained, copy-pastable example that\r\n generates the issue if possible. Be concise with code posted.\r\n Guidelines on how to provide a good bug report:\r\n-->\r\n\r\n### What did you do?\r\nI read in an wmf image and then save it as a png image. I found the resolution of the result image is rather low no matter how I set the quality parameters. \r\n\r\nWhen I looking into the source code ( src/PIL/WmfImagePlugin.py), I found the resolution was fixed to 72dpi.\r\n\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: win10\r\n* Python: 3.6.8\r\n* Pillow: 6.1.0\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# WMF stub codec\n#\n# history:\n# 1996-12-14 fl Created\n# 2004-02-22 fl Turned into a stub driver\n# 2004-02-23 fl Added EMF support\n#\n# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.\n# Copyright (c) Fredrik Lundh 1996.\n#\n# See the README file for information on usage and redistribution.\n#\n# WMF/EMF reference documentation:\n# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf\n# http://wvware.sourceforge.net/caolan/index.html\n# http://wvware.sourceforge.net/caolan/ora-wmf.html\n\nfrom . import Image, ImageFile\nfrom ._binary import i16le as word, i32le as dword, si16le as short, si32le as _long\n\n_handler = None\n\n\ndef register_handler(handler):\n \"\"\"\n Install application-specific WMF image handler.\n\n :param handler: Handler object.\n \"\"\"\n global _handler\n _handler = handler\n\n\nif hasattr(Image.core, \"drawwmf\"):\n # install default handler (windows only)\n\n class WmfHandler:\n def open(self, im):\n im.mode = \"RGB\"\n self.bbox = im.info[\"wmf_bbox\"]\n\n def load(self, im):\n im.fp.seek(0) # rewind\n return Image.frombytes(\n \"RGB\",\n im.size,\n Image.core.drawwmf(im.fp.read(), im.size, self.bbox),\n \"raw\",\n \"BGR\",\n (im.size[0] * 3 + 3) & -4,\n -1,\n )\n\n register_handler(WmfHandler())\n\n#\n# --------------------------------------------------------------------\n# Read WMF file\n\n\ndef _accept(prefix):\n return (\n prefix[:6] == b\"\\xd7\\xcd\\xc6\\x9a\\x00\\x00\" or prefix[:4] == b\"\\x01\\x00\\x00\\x00\"\n )\n\n\n##\n# Image plugin for Windows metafiles.\n\n\nclass WmfStubImageFile(ImageFile.StubImageFile):\n\n format = \"WMF\"\n format_description = \"Windows Metafile\"\n\n def _open(self):\n\n # check placable header\n s = self.fp.read(80)\n\n if s[:6] == b\"\\xd7\\xcd\\xc6\\x9a\\x00\\x00\":\n\n # placeable windows metafile\n\n # get units per inch\n inch = word(s, 14)\n\n # get bounding box\n x0 = short(s, 6)\n y0 = short(s, 8)\n x1 = short(s, 10)\n y1 = short(s, 12)\n\n # normalize size to 72 dots per inch\n size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch\n\n self.info[\"wmf_bbox\"] = x0, y0, x1, y1\n\n self.info[\"dpi\"] = 72\n\n # sanity check (standard metafile header)\n if s[22:26] != b\"\\x01\\x00\\t\\x00\":\n raise SyntaxError(\"Unsupported WMF file format\")\n\n elif dword(s) == 1 and s[40:44] == b\" EMF\":\n # enhanced metafile\n\n # get bounding box\n x0 = _long(s, 8)\n y0 = _long(s, 12)\n x1 = _long(s, 16)\n y1 = _long(s, 20)\n\n # get frame (in 0.01 millimeter units)\n frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)\n\n # normalize size to 72 dots per inch\n size = x1 - x0, y1 - y0\n\n # calculate dots per inch from bbox and frame\n xdpi = int(2540.0 * (x1 - y0) / (frame[2] - frame[0]) + 0.5)\n ydpi = int(2540.0 * (y1 - y0) / (frame[3] - frame[1]) + 0.5)\n\n self.info[\"wmf_bbox\"] = x0, y0, x1, y1\n\n if xdpi == ydpi:\n self.info[\"dpi\"] = xdpi\n else:\n self.info[\"dpi\"] = xdpi, ydpi\n\n else:\n raise SyntaxError(\"Unsupported file format\")\n\n self.mode = \"RGB\"\n self._size = size\n\n loader = self._load()\n if loader:\n loader.open(self)\n\n def _load(self):\n return _handler\n\n\ndef _save(im, fp, filename):\n if _handler is None or not hasattr(_handler, \"save\"):\n raise OSError(\"WMF save handler not installed\")\n _handler.save(im, fp, filename)\n\n\n#\n# --------------------------------------------------------------------\n# Registry stuff\n\n\nImage.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)\nImage.register_save(WmfStubImageFile.format, _save)\n\nImage.register_extensions(WmfStubImageFile.format, [\".wmf\", \".emf\"])\n", "path": "src/PIL/WmfImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# WMF stub codec\n#\n# history:\n# 1996-12-14 fl Created\n# 2004-02-22 fl Turned into a stub driver\n# 2004-02-23 fl Added EMF support\n#\n# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.\n# Copyright (c) Fredrik Lundh 1996.\n#\n# See the README file for information on usage and redistribution.\n#\n# WMF/EMF reference documentation:\n# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf\n# http://wvware.sourceforge.net/caolan/index.html\n# http://wvware.sourceforge.net/caolan/ora-wmf.html\n\nfrom . import Image, ImageFile\nfrom ._binary import i16le as word, i32le as dword, si16le as short, si32le as _long\n\n_handler = None\n\n\ndef register_handler(handler):\n \"\"\"\n Install application-specific WMF image handler.\n\n :param handler: Handler object.\n \"\"\"\n global _handler\n _handler = handler\n\n\nif hasattr(Image.core, \"drawwmf\"):\n # install default handler (windows only)\n\n class WmfHandler:\n def open(self, im):\n im.mode = \"RGB\"\n self.bbox = im.info[\"wmf_bbox\"]\n\n def load(self, im):\n im.fp.seek(0) # rewind\n return Image.frombytes(\n \"RGB\",\n im.size,\n Image.core.drawwmf(im.fp.read(), im.size, self.bbox),\n \"raw\",\n \"BGR\",\n (im.size[0] * 3 + 3) & -4,\n -1,\n )\n\n register_handler(WmfHandler())\n\n#\n# --------------------------------------------------------------------\n# Read WMF file\n\n\ndef _accept(prefix):\n return (\n prefix[:6] == b\"\\xd7\\xcd\\xc6\\x9a\\x00\\x00\" or prefix[:4] == b\"\\x01\\x00\\x00\\x00\"\n )\n\n\n##\n# Image plugin for Windows metafiles.\n\n\nclass WmfStubImageFile(ImageFile.StubImageFile):\n\n format = \"WMF\"\n format_description = \"Windows Metafile\"\n\n def _open(self):\n self._inch = None\n\n # check placable header\n s = self.fp.read(80)\n\n if s[:6] == b\"\\xd7\\xcd\\xc6\\x9a\\x00\\x00\":\n\n # placeable windows metafile\n\n # get units per inch\n self._inch = word(s, 14)\n\n # get bounding box\n x0 = short(s, 6)\n y0 = short(s, 8)\n x1 = short(s, 10)\n y1 = short(s, 12)\n\n # normalize size to 72 dots per inch\n self.info[\"dpi\"] = 72\n size = (\n (x1 - x0) * self.info[\"dpi\"] // self._inch,\n (y1 - y0) * self.info[\"dpi\"] // self._inch,\n )\n\n self.info[\"wmf_bbox\"] = x0, y0, x1, y1\n\n # sanity check (standard metafile header)\n if s[22:26] != b\"\\x01\\x00\\t\\x00\":\n raise SyntaxError(\"Unsupported WMF file format\")\n\n elif dword(s) == 1 and s[40:44] == b\" EMF\":\n # enhanced metafile\n\n # get bounding box\n x0 = _long(s, 8)\n y0 = _long(s, 12)\n x1 = _long(s, 16)\n y1 = _long(s, 20)\n\n # get frame (in 0.01 millimeter units)\n frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)\n\n size = x1 - x0, y1 - y0\n\n # calculate dots per inch from bbox and frame\n xdpi = int(2540.0 * (x1 - y0) / (frame[2] - frame[0]) + 0.5)\n ydpi = int(2540.0 * (y1 - y0) / (frame[3] - frame[1]) + 0.5)\n\n self.info[\"wmf_bbox\"] = x0, y0, x1, y1\n\n if xdpi == ydpi:\n self.info[\"dpi\"] = xdpi\n else:\n self.info[\"dpi\"] = xdpi, ydpi\n\n else:\n raise SyntaxError(\"Unsupported file format\")\n\n self.mode = \"RGB\"\n self._size = size\n\n loader = self._load()\n if loader:\n loader.open(self)\n\n def _load(self):\n return _handler\n\n def load(self, dpi=None):\n if dpi is not None and self._inch is not None:\n self.info[\"dpi\"] = int(dpi + 0.5)\n x0, y0, x1, y1 = self.info[\"wmf_bbox\"]\n self._size = (\n (x1 - x0) * self.info[\"dpi\"] // self._inch,\n (y1 - y0) * self.info[\"dpi\"] // self._inch,\n )\n super().load()\n\n\ndef _save(im, fp, filename):\n if _handler is None or not hasattr(_handler, \"save\"):\n raise OSError(\"WMF save handler not installed\")\n _handler.save(im, fp, filename)\n\n\n#\n# --------------------------------------------------------------------\n# Registry stuff\n\n\nImage.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)\nImage.register_save(WmfStubImageFile.format, _save)\n\nImage.register_extensions(WmfStubImageFile.format, [\".wmf\", \".emf\"])\n", "path": "src/PIL/WmfImagePlugin.py"}]}
2,083
649
gh_patches_debug_31388
rasdani/github-patches
git_diff
conan-io__conan-3100
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issues with proxies I've started Conan server in my organization's local network. For external sites (like github) proxy must be used, but not for computers in local network. So, I have there env variables: ``` HTTP_PROXY=http://proxy.domain:3128 HTTP_PROXY=https://proxy.domain:3128 NO_PROXY=localhost,127.0.0.1,*.domain,192.168.* ``` With these variables I can create packages with recipes in my local cache (and this recipes can download external sources), but cannot download recipes and prebuilt packages from organization's Conan server. I've tried to set same settings in conan.conf, but had no luck. Also I've tried to use Conan server domain name and/or IP address in NO_PROXY with same result: ``` > conan search -r <remotename> "OpenCV*" ERROR: Permission denied for user: '<username>'. [Remote: <remotename>] ``` But if I disable all proxies (comment in conan.conf and unset env variables), it works just fine: ``` Existing package recipes: OpenCV/3.3.1@lukyanets/testing ``` Conan v. 1.0.1, OS: Arch Linux, CentOS 6, Windows 10. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conans/client/rest/conan_requester.py` Content: ``` 1 import fnmatch 2 import os 3 4 from conans.util.files import save 5 6 7 class ConanRequester(object): 8 9 def __init__(self, requester, client_cache, timeout): 10 self.proxies = client_cache.conan_config.proxies or {} 11 self._no_proxy_match = [el.strip() for el in 12 self.proxies.pop("no_proxy_match", "").split(",")] 13 self._timeout_seconds = timeout 14 15 # Retrocompatibility with deprecated no_proxy 16 # Account for the requests NO_PROXY env variable, not defined as a proxy like http= 17 no_proxy = self.proxies.pop("no_proxy", None) 18 if no_proxy: 19 os.environ["NO_PROXY"] = no_proxy 20 21 self._requester = requester 22 self._client_cache = client_cache 23 24 if not os.path.exists(self._client_cache.cacert_path): 25 from conans.client.rest.cacert import cacert 26 save(self._client_cache.cacert_path, cacert) 27 28 if not os.path.exists(client_cache.client_cert_path): 29 self._client_certificates = None 30 else: 31 if os.path.exists(client_cache.client_cert_key_path): 32 # Requests can accept a tuple with cert and key, or just an string with a 33 # file having both 34 self._client_certificates = (client_cache.client_cert_path, 35 client_cache.client_cert_key_path) 36 else: 37 self._client_certificates = client_cache.client_cert_path 38 39 def _should_skip_proxy(self, url): 40 41 for entry in self._no_proxy_match: 42 if fnmatch.fnmatch(url, entry): 43 return True 44 45 return False 46 47 def _add_kwargs(self, url, kwargs): 48 if kwargs.get("verify", None) is True: 49 kwargs["verify"] = self._client_cache.cacert_path 50 else: 51 kwargs["verify"] = False 52 kwargs["cert"] = self._client_certificates 53 if self.proxies: 54 if not self._should_skip_proxy(url): 55 kwargs["proxies"] = self.proxies 56 if self._timeout_seconds: 57 kwargs["timeout"] = self._timeout_seconds 58 return kwargs 59 60 def get(self, url, **kwargs): 61 return self._requester.get(url, **self._add_kwargs(url, kwargs)) 62 63 def put(self, url, **kwargs): 64 return self._requester.put(url, **self._add_kwargs(url, kwargs)) 65 66 def delete(self, url, **kwargs): 67 return self._requester.delete(url, **self._add_kwargs(url, kwargs)) 68 69 def post(self, url, **kwargs): 70 return self._requester.post(url, **self._add_kwargs(url, kwargs)) 71 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conans/client/rest/conan_requester.py b/conans/client/rest/conan_requester.py --- a/conans/client/rest/conan_requester.py +++ b/conans/client/rest/conan_requester.py @@ -9,7 +9,7 @@ def __init__(self, requester, client_cache, timeout): self.proxies = client_cache.conan_config.proxies or {} self._no_proxy_match = [el.strip() for el in - self.proxies.pop("no_proxy_match", "").split(",")] + self.proxies.pop("no_proxy_match", "").split(",") if el] self._timeout_seconds = timeout # Retrocompatibility with deprecated no_proxy @@ -58,14 +58,28 @@ return kwargs def get(self, url, **kwargs): - return self._requester.get(url, **self._add_kwargs(url, kwargs)) + return self._call_method("get", url, **kwargs) def put(self, url, **kwargs): - return self._requester.put(url, **self._add_kwargs(url, kwargs)) + return self._call_method("put", url, **kwargs) def delete(self, url, **kwargs): - return self._requester.delete(url, **self._add_kwargs(url, kwargs)) + return self._call_method("delete", url, **kwargs) def post(self, url, **kwargs): - return self._requester.post(url, **self._add_kwargs(url, kwargs)) - + return self._call_method("post", url, **kwargs) + + def _call_method(self, method, url, **kwargs): + popped = False + if self.proxies or self._no_proxy_match: + old_env = dict(os.environ) + # Clean the proxies from the environ and use the conan specified proxies + for var_name in ("http_proxy", "https_proxy", "no_proxy"): + popped = popped or os.environ.pop(var_name, None) + popped = popped or os.environ.pop(var_name.upper(), None) + try: + return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs)) + finally: + if popped: + os.environ.clear() + os.environ.update(old_env)
{"golden_diff": "diff --git a/conans/client/rest/conan_requester.py b/conans/client/rest/conan_requester.py\n--- a/conans/client/rest/conan_requester.py\n+++ b/conans/client/rest/conan_requester.py\n@@ -9,7 +9,7 @@\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n- self.proxies.pop(\"no_proxy_match\", \"\").split(\",\")]\n+ self.proxies.pop(\"no_proxy_match\", \"\").split(\",\") if el]\n self._timeout_seconds = timeout\n \n # Retrocompatibility with deprecated no_proxy\n@@ -58,14 +58,28 @@\n return kwargs\n \n def get(self, url, **kwargs):\n- return self._requester.get(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"get\", url, **kwargs)\n \n def put(self, url, **kwargs):\n- return self._requester.put(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"put\", url, **kwargs)\n \n def delete(self, url, **kwargs):\n- return self._requester.delete(url, **self._add_kwargs(url, kwargs))\n+ return self._call_method(\"delete\", url, **kwargs)\n \n def post(self, url, **kwargs):\n- return self._requester.post(url, **self._add_kwargs(url, kwargs))\n-\n+ return self._call_method(\"post\", url, **kwargs)\n+\n+ def _call_method(self, method, url, **kwargs):\n+ popped = False\n+ if self.proxies or self._no_proxy_match:\n+ old_env = dict(os.environ)\n+ # Clean the proxies from the environ and use the conan specified proxies\n+ for var_name in (\"http_proxy\", \"https_proxy\", \"no_proxy\"):\n+ popped = popped or os.environ.pop(var_name, None)\n+ popped = popped or os.environ.pop(var_name.upper(), None)\n+ try:\n+ return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs))\n+ finally:\n+ if popped:\n+ os.environ.clear()\n+ os.environ.update(old_env)\n", "issue": "Issues with proxies\nI've started Conan server in my organization's local network. For external sites (like github) proxy must be used, but not for computers in local network.\r\nSo, I have there env variables:\r\n```\r\nHTTP_PROXY=http://proxy.domain:3128\r\nHTTP_PROXY=https://proxy.domain:3128\r\nNO_PROXY=localhost,127.0.0.1,*.domain,192.168.*\r\n```\r\nWith these variables I can create packages with recipes in my local cache (and this recipes can download external sources), but cannot download recipes and prebuilt packages from organization's Conan server.\r\n\r\nI've tried to set same settings in conan.conf, but had no luck. Also I've tried to use Conan server domain name and/or IP address in NO_PROXY with same result:\r\n```\r\n> conan search -r <remotename> \"OpenCV*\"\r\nERROR: Permission denied for user: '<username>'. [Remote: <remotename>]\r\n```\r\n\r\nBut if I disable all proxies (comment in conan.conf and unset env variables), it works just fine:\r\n```\r\nExisting package recipes:\r\n\r\nOpenCV/3.3.1@lukyanets/testing\r\n```\r\n\r\nConan v. 1.0.1, OS: Arch Linux, CentOS 6, Windows 10.\n", "before_files": [{"content": "import fnmatch\nimport os\n\nfrom conans.util.files import save\n\n\nclass ConanRequester(object):\n\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n self.proxies.pop(\"no_proxy_match\", \"\").split(\",\")]\n self._timeout_seconds = timeout\n\n # Retrocompatibility with deprecated no_proxy\n # Account for the requests NO_PROXY env variable, not defined as a proxy like http=\n no_proxy = self.proxies.pop(\"no_proxy\", None)\n if no_proxy:\n os.environ[\"NO_PROXY\"] = no_proxy\n\n self._requester = requester\n self._client_cache = client_cache\n\n if not os.path.exists(self._client_cache.cacert_path):\n from conans.client.rest.cacert import cacert\n save(self._client_cache.cacert_path, cacert)\n\n if not os.path.exists(client_cache.client_cert_path):\n self._client_certificates = None\n else:\n if os.path.exists(client_cache.client_cert_key_path):\n # Requests can accept a tuple with cert and key, or just an string with a\n # file having both\n self._client_certificates = (client_cache.client_cert_path,\n client_cache.client_cert_key_path)\n else:\n self._client_certificates = client_cache.client_cert_path\n\n def _should_skip_proxy(self, url):\n\n for entry in self._no_proxy_match:\n if fnmatch.fnmatch(url, entry):\n return True\n\n return False\n\n def _add_kwargs(self, url, kwargs):\n if kwargs.get(\"verify\", None) is True:\n kwargs[\"verify\"] = self._client_cache.cacert_path\n else:\n kwargs[\"verify\"] = False\n kwargs[\"cert\"] = self._client_certificates\n if self.proxies:\n if not self._should_skip_proxy(url):\n kwargs[\"proxies\"] = self.proxies\n if self._timeout_seconds:\n kwargs[\"timeout\"] = self._timeout_seconds\n return kwargs\n\n def get(self, url, **kwargs):\n return self._requester.get(url, **self._add_kwargs(url, kwargs))\n\n def put(self, url, **kwargs):\n return self._requester.put(url, **self._add_kwargs(url, kwargs))\n\n def delete(self, url, **kwargs):\n return self._requester.delete(url, **self._add_kwargs(url, kwargs))\n\n def post(self, url, **kwargs):\n return self._requester.post(url, **self._add_kwargs(url, kwargs))\n\n", "path": "conans/client/rest/conan_requester.py"}], "after_files": [{"content": "import fnmatch\nimport os\n\nfrom conans.util.files import save\n\n\nclass ConanRequester(object):\n\n def __init__(self, requester, client_cache, timeout):\n self.proxies = client_cache.conan_config.proxies or {}\n self._no_proxy_match = [el.strip() for el in\n self.proxies.pop(\"no_proxy_match\", \"\").split(\",\") if el]\n self._timeout_seconds = timeout\n\n # Retrocompatibility with deprecated no_proxy\n # Account for the requests NO_PROXY env variable, not defined as a proxy like http=\n no_proxy = self.proxies.pop(\"no_proxy\", None)\n if no_proxy:\n os.environ[\"NO_PROXY\"] = no_proxy\n\n self._requester = requester\n self._client_cache = client_cache\n\n if not os.path.exists(self._client_cache.cacert_path):\n from conans.client.rest.cacert import cacert\n save(self._client_cache.cacert_path, cacert)\n\n if not os.path.exists(client_cache.client_cert_path):\n self._client_certificates = None\n else:\n if os.path.exists(client_cache.client_cert_key_path):\n # Requests can accept a tuple with cert and key, or just an string with a\n # file having both\n self._client_certificates = (client_cache.client_cert_path,\n client_cache.client_cert_key_path)\n else:\n self._client_certificates = client_cache.client_cert_path\n\n def _should_skip_proxy(self, url):\n\n for entry in self._no_proxy_match:\n if fnmatch.fnmatch(url, entry):\n return True\n\n return False\n\n def _add_kwargs(self, url, kwargs):\n if kwargs.get(\"verify\", None) is True:\n kwargs[\"verify\"] = self._client_cache.cacert_path\n else:\n kwargs[\"verify\"] = False\n kwargs[\"cert\"] = self._client_certificates\n if self.proxies:\n if not self._should_skip_proxy(url):\n kwargs[\"proxies\"] = self.proxies\n if self._timeout_seconds:\n kwargs[\"timeout\"] = self._timeout_seconds\n return kwargs\n\n def get(self, url, **kwargs):\n return self._call_method(\"get\", url, **kwargs)\n\n def put(self, url, **kwargs):\n return self._call_method(\"put\", url, **kwargs)\n\n def delete(self, url, **kwargs):\n return self._call_method(\"delete\", url, **kwargs)\n\n def post(self, url, **kwargs):\n return self._call_method(\"post\", url, **kwargs)\n\n def _call_method(self, method, url, **kwargs):\n popped = False\n if self.proxies or self._no_proxy_match:\n old_env = dict(os.environ)\n # Clean the proxies from the environ and use the conan specified proxies\n for var_name in (\"http_proxy\", \"https_proxy\", \"no_proxy\"):\n popped = popped or os.environ.pop(var_name, None)\n popped = popped or os.environ.pop(var_name.upper(), None)\n try:\n return getattr(self._requester, method)(url, **self._add_kwargs(url, kwargs))\n finally:\n if popped:\n os.environ.clear()\n os.environ.update(old_env)\n", "path": "conans/client/rest/conan_requester.py"}]}
1,259
507
gh_patches_debug_24539
rasdani/github-patches
git_diff
modin-project__modin-6355
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BUG: Release notes script isn't handling bad or pre-release versions correctly ### Modin version checks - [X] I have checked that this issue has not already been reported. - [X] I have confirmed this bug exists on the latest released version of Modin. - [X] I have confirmed this bug exists on the main branch of Modin. (In order to do this you can follow [this guide](https://modin.readthedocs.io/en/stable/getting_started/installation.html#installing-from-the-github-master-branch).) ### Reproducible Example ```python python scripts/release.py notes ``` ### Issue Description Invalid tag causes exception. Not skipping pre-releases causes wrong detection of previous release (example of building notes for `0.22.3` and latest release being detected as `0.23.0rc0`): ``` Detected release type: patch Previous patch release: refs/tags/0.23.0rc0 Computing release notes for 0.23.0rc0 -> 0.23.1... Current HEAD is not derived from previous release: did you forget to checkout correct branch or pull tags? ``` ### Expected Behavior no exceptions and correct previous version detected ### Error Logs <details> ```python-traceback Traceback (most recent call last): File "c:\Vass\Ponder\modin\scripts\release.py", line 276, in <module> main() File "c:\Vass\Ponder\modin\scripts\release.py", line 272, in main sys.exit(args.func(args)) File "c:\Vass\Ponder\modin\scripts\release.py", line 154, in make_notes prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type) File "c:\Vass\Ponder\modin\scripts\release.py", line 117, in get_previous_release tags = [ File "c:\Vass\Ponder\modin\scripts\release.py", line 118, in <listcomp> (entry, version.parse(entry.lstrip("refs/tags/"))) File "C:\Users\Vass\.conda\envs\modin\lib\site-packages\packaging\version.py", line 52, in parse return Version(version) File "C:\Users\Vass\.conda\envs\modin\lib\site-packages\packaging\version.py", line 198, in __init__ raise InvalidVersion(f"Invalid version: '{version}'") packaging.version.InvalidVersion: Invalid version: 'Modin-v0.1.0' ``` </details> ### Installed Versions irrelevant --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/release.py` Content: ``` 1 import re 2 import json 3 import atexit 4 import collections 5 import argparse 6 from pathlib import Path 7 import sys 8 from packaging import version 9 10 import pygit2 11 import github 12 13 14 class GithubUserResolver: 15 def __init__(self, email2commit, token): 16 self.__cache_file = Path(__file__).parent / "gh-users-cache.json" 17 self.__cache = ( 18 json.loads(self.__cache_file.read_text()) 19 if self.__cache_file.exists() 20 else {} 21 ) 22 # filter unknown users hoping we'd be able to find them this time 23 self.__cache = {key: value for key, value in self.__cache.items() if value} 24 # using anonymous access if token not specified 25 self.__github = github.Github(token or None) 26 self.__modin_repo = self.__github.get_repo("modin-project/modin") 27 self.__email2commit = email2commit 28 atexit.register(self.__save) 29 30 def __search_commits(self, term): 31 if commit := self.__email2commit.get(term): 32 gh_commit = self.__modin_repo.get_commit(str(commit)) 33 return gh_commit.author.login 34 return None 35 36 @staticmethod 37 def __is_email(term): 38 return re.match(r".*@.*\..*", term) 39 40 def __search_github(self, term): 41 search = f"in:email {term}" if self.__is_email(term) else f"fullname:{term}" 42 match = [user.login for user in self.__github.search_users(search)] 43 return match[0] if len(match) == 1 else None 44 45 def __try_user(self, term): 46 if self.__is_email(term): 47 return None 48 try: 49 return self.__github.get_user(term).login 50 except github.GithubException as ex: 51 if ex.status != 404: 52 raise 53 return None 54 55 def __resolve_single(self, term): 56 return ( 57 self.__search_commits(term) 58 or self.__search_github(term) 59 or self.__try_user(term) 60 ) 61 62 def __resolve_cache(self, name, email): 63 return self.__cache.get(f"{name} <{email}>", None) 64 65 def __register(self, name, email, match): 66 self.__cache[f"{name} <{email}>"] = match 67 68 def resolve(self, people): 69 logins, unknowns = set(), set() 70 71 for name, email in people: 72 if match := self.__resolve_cache(name, email): 73 logins.add(match) 74 elif match := self.__resolve_single(email): 75 self.__register(name, email, match) 76 logins.add(match) 77 else: 78 if match := self.__resolve_single(name): 79 logins.add(match) 80 else: 81 unknowns.add((name, email)) 82 self.__register(name, email, match) 83 84 return logins, unknowns 85 86 def resolve_by_reviews(self, unknowns, email2pr): 87 logins, new_unknowns = set(), set() 88 for name, email in unknowns: 89 commit = self.__modin_repo.get_commit(str(email2pr[email])) 90 found = set() 91 for pull in commit.get_pulls(): 92 for review in pull.get_reviews(): 93 user = review.user 94 if user.name == name and (not user.email or user.email == email): 95 found.add(user.login) 96 97 if len(found) == 1: 98 self.__register(name, email, list(found)[0]) 99 logins |= found 100 else: 101 new_unknowns.add((name, email)) 102 103 return logins, new_unknowns 104 105 def __save(self): 106 self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True)) 107 108 109 class GitWrapper: 110 def __init__(self): 111 self.repo = pygit2.Repository(Path(__file__).parent) 112 113 def is_on_master(self): 114 return self.repo.references["refs/heads/master"] == self.repo.head 115 116 def get_previous_release(self, rel_type): 117 tags = [ 118 (entry, version.parse(entry.lstrip("refs/tags/"))) 119 for entry in self.repo.references 120 if entry.startswith("refs/tags/") 121 ] 122 # filter away legacy versions (which aren't following the proper naming schema) 123 tags = [(entry, ver) for entry, ver in tags if isinstance(ver, version.Version)] 124 if rel_type == "minor": 125 # leave only minor releases 126 tags = [(entry, ver) for entry, ver in tags if ver.micro == 0] 127 else: 128 assert rel_type == "patch" 129 prev_ref, prev_ver = max(tags, key=lambda pair: pair[1]) 130 return prev_ref, self.repo.references[prev_ref].peel(), prev_ver 131 132 def get_commits_upto(self, stop_commit): 133 history = [] 134 for obj in self.repo.walk(self.repo.head.target): 135 if obj.id == stop_commit.id: 136 break 137 history.append(obj) 138 else: 139 raise ValueError("Current HEAD is not derived from previous release") 140 return history 141 142 def ensure_title_link(self, obj: pygit2.Commit): 143 title = obj.message.splitlines()[0] 144 if not re.match(r".*\(#(\d+)\)$", title): 145 title += f" ({obj.short_id})" 146 return title 147 148 149 def make_notes(args): 150 wrapper = GitWrapper() 151 release_type = "minor" if wrapper.is_on_master() else "patch" 152 sys.stderr.write(f"Detected release type: {release_type}\n") 153 154 prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type) 155 sys.stderr.write(f"Previous {release_type} release: {prev_ref}\n") 156 157 next_major, next_minor, next_patch = prev_ver.release 158 if release_type == "minor": 159 next_minor += 1 160 elif release_type == "patch": 161 next_patch += 1 162 else: 163 raise ValueError(f"Unexpected release type: {release_type}") 164 next_ver = version.Version(f"{next_major}.{next_minor}.{next_patch}") 165 166 sys.stderr.write(f"Computing release notes for {prev_ver} -> {next_ver}...\n") 167 try: 168 history = wrapper.get_commits_upto(prev_commit) 169 except ValueError as ex: 170 sys.stderr.write( 171 f"{ex}: did you forget to checkout correct branch or pull tags?" 172 ) 173 return 1 174 if not history: 175 sys.stderr.write(f"No commits since {prev_ver} found, nothing to generate!\n") 176 return 1 177 178 titles = collections.defaultdict(list) 179 people = set() 180 email2commit, email2pr = {}, {} 181 for obj in history: 182 title = obj.message.splitlines()[0] 183 titles[title.split("-")[0]].append(obj) 184 new_people = set( 185 re.findall( 186 r"(?:(?:Signed-off-by|Co-authored-by):\s*)([\w\s,]+?)\s*<([^>]+)>", 187 obj.message, 188 ) 189 ) 190 for _, email in new_people: 191 email2pr[email] = obj.id 192 people |= new_people 193 email2commit[obj.author.email] = obj.id 194 sys.stderr.write(f"Found {len(history)} commit(s) since {prev_ref}\n") 195 196 sys.stderr.write("Resolving contributors...\n") 197 user_resolver = GithubUserResolver(email2commit, args.token) 198 logins, unknowns = user_resolver.resolve(people) 199 new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr) 200 logins |= new_logins 201 sys.stderr.write(f"Found {len(logins)} GitHub usernames.\n") 202 if unknowns: 203 sys.stderr.write( 204 f"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\n" 205 ) 206 207 sections = [ 208 ("Stability and Bugfixes", "FIX"), 209 ("Performance enhancements", "PERF"), 210 ("Refactor Codebase", "REFACTOR"), 211 ("Update testing suite", "TEST"), 212 ("Documentation improvements", "DOCS"), 213 ("New Features", "FEAT"), 214 ] 215 216 notes = rf"""Modin {next_ver} 217 218 <Please fill in short release summary> 219 220 Key Features and Updates Since {prev_ver} 221 -------------------------------{'-' * len(str(prev_ver))} 222 """ 223 224 def _add_section(section, prs): 225 nonlocal notes 226 if prs: 227 notes += f"* {section}\n" 228 notes += "\n".join( 229 [ 230 f" * {wrapper.ensure_title_link(obj)}" 231 for obj in sorted(prs, key=lambda obj: obj.message) 232 ] 233 ) 234 notes += "\n" 235 236 for section, key in sections: 237 _add_section(section, titles.pop(key, None)) 238 239 uncategorized = sum(titles.values(), []) 240 _add_section("Uncategorized improvements", uncategorized) 241 242 notes += r""" 243 Contributors 244 ------------ 245 """ 246 notes += "\n".join(f"@{login}" for login in sorted(logins)) + "\n" 247 notes += ( 248 "\n".join( 249 f"<unknown-login> {name} <{email}>" for name, email in sorted(unknowns) 250 ) 251 + "\n" 252 ) 253 254 sys.stdout.write(notes) 255 256 257 def main(): 258 parse = argparse.ArgumentParser() 259 parse.add_argument( 260 "--token", 261 type=str, 262 default="", 263 help="GitHub token for queries (optional, bumps up rate limit)", 264 ) 265 parse.set_defaults(func=lambda _: parse.print_usage()) 266 subparsers = parse.add_subparsers() 267 268 notes = subparsers.add_parser("notes", help="Generate release notes") 269 notes.set_defaults(func=make_notes) 270 271 args = parse.parse_args() 272 sys.exit(args.func(args)) 273 274 275 if __name__ == "__main__": 276 main() 277 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/release.py b/scripts/release.py --- a/scripts/release.py +++ b/scripts/release.py @@ -113,14 +113,26 @@ def is_on_master(self): return self.repo.references["refs/heads/master"] == self.repo.head + @staticmethod + def __get_tag_version(entry): + try: + return version.parse(entry.lstrip("refs/tags/")) + except version.InvalidVersion as ex: + return f'<bad version "{entry}": {ex}>' + def get_previous_release(self, rel_type): tags = [ - (entry, version.parse(entry.lstrip("refs/tags/"))) + (entry, self.__get_tag_version(entry)) for entry in self.repo.references if entry.startswith("refs/tags/") ] - # filter away legacy versions (which aren't following the proper naming schema) - tags = [(entry, ver) for entry, ver in tags if isinstance(ver, version.Version)] + # filter away legacy versions (which aren't following the proper naming schema); + # also skip pre-releases + tags = [ + (entry, ver) + for entry, ver in tags + if isinstance(ver, version.Version) and not ver.pre + ] if rel_type == "minor": # leave only minor releases tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]
{"golden_diff": "diff --git a/scripts/release.py b/scripts/release.py\n--- a/scripts/release.py\n+++ b/scripts/release.py\n@@ -113,14 +113,26 @@\n def is_on_master(self):\n return self.repo.references[\"refs/heads/master\"] == self.repo.head\n \n+ @staticmethod\n+ def __get_tag_version(entry):\n+ try:\n+ return version.parse(entry.lstrip(\"refs/tags/\"))\n+ except version.InvalidVersion as ex:\n+ return f'<bad version \"{entry}\": {ex}>'\n+\n def get_previous_release(self, rel_type):\n tags = [\n- (entry, version.parse(entry.lstrip(\"refs/tags/\")))\n+ (entry, self.__get_tag_version(entry))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n- # filter away legacy versions (which aren't following the proper naming schema)\n- tags = [(entry, ver) for entry, ver in tags if isinstance(ver, version.Version)]\n+ # filter away legacy versions (which aren't following the proper naming schema);\n+ # also skip pre-releases\n+ tags = [\n+ (entry, ver)\n+ for entry, ver in tags\n+ if isinstance(ver, version.Version) and not ver.pre\n+ ]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n", "issue": "BUG: Release notes script isn't handling bad or pre-release versions correctly\n### Modin version checks\n\n- [X] I have checked that this issue has not already been reported.\n\n- [X] I have confirmed this bug exists on the latest released version of Modin.\n\n- [X] I have confirmed this bug exists on the main branch of Modin. (In order to do this you can follow [this guide](https://modin.readthedocs.io/en/stable/getting_started/installation.html#installing-from-the-github-master-branch).)\n\n\n### Reproducible Example\n\n```python\npython scripts/release.py notes\n```\n\n\n### Issue Description\n\nInvalid tag causes exception.\r\n\r\nNot skipping pre-releases causes wrong detection of previous release (example of building notes for `0.22.3` and latest release being detected as `0.23.0rc0`):\r\n```\r\nDetected release type: patch\r\nPrevious patch release: refs/tags/0.23.0rc0\r\nComputing release notes for 0.23.0rc0 -> 0.23.1...\r\nCurrent HEAD is not derived from previous release: did you forget to checkout correct branch or pull tags?\r\n```\n\n### Expected Behavior\n\nno exceptions and correct previous version detected\n\n### Error Logs\n\n<details>\r\n\r\n```python-traceback\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\Vass\\Ponder\\modin\\scripts\\release.py\", line 276, in <module>\r\n main()\r\n File \"c:\\Vass\\Ponder\\modin\\scripts\\release.py\", line 272, in main\r\n sys.exit(args.func(args))\r\n File \"c:\\Vass\\Ponder\\modin\\scripts\\release.py\", line 154, in make_notes\r\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\r\n File \"c:\\Vass\\Ponder\\modin\\scripts\\release.py\", line 117, in get_previous_release\r\n tags = [\r\n File \"c:\\Vass\\Ponder\\modin\\scripts\\release.py\", line 118, in <listcomp>\r\n (entry, version.parse(entry.lstrip(\"refs/tags/\")))\r\n File \"C:\\Users\\Vass\\.conda\\envs\\modin\\lib\\site-packages\\packaging\\version.py\", line 52, in parse\r\n return Version(version)\r\n File \"C:\\Users\\Vass\\.conda\\envs\\modin\\lib\\site-packages\\packaging\\version.py\", line 198, in __init__\r\n raise InvalidVersion(f\"Invalid version: '{version}'\")\r\npackaging.version.InvalidVersion: Invalid version: 'Modin-v0.1.0'\r\n\r\n```\r\n\r\n</details>\r\n\n\n### Installed Versions\n\nirrelevant\n", "before_files": [{"content": "import re\nimport json\nimport atexit\nimport collections\nimport argparse\nfrom pathlib import Path\nimport sys\nfrom packaging import version\n\nimport pygit2\nimport github\n\n\nclass GithubUserResolver:\n def __init__(self, email2commit, token):\n self.__cache_file = Path(__file__).parent / \"gh-users-cache.json\"\n self.__cache = (\n json.loads(self.__cache_file.read_text())\n if self.__cache_file.exists()\n else {}\n )\n # filter unknown users hoping we'd be able to find them this time\n self.__cache = {key: value for key, value in self.__cache.items() if value}\n # using anonymous access if token not specified\n self.__github = github.Github(token or None)\n self.__modin_repo = self.__github.get_repo(\"modin-project/modin\")\n self.__email2commit = email2commit\n atexit.register(self.__save)\n\n def __search_commits(self, term):\n if commit := self.__email2commit.get(term):\n gh_commit = self.__modin_repo.get_commit(str(commit))\n return gh_commit.author.login\n return None\n\n @staticmethod\n def __is_email(term):\n return re.match(r\".*@.*\\..*\", term)\n\n def __search_github(self, term):\n search = f\"in:email {term}\" if self.__is_email(term) else f\"fullname:{term}\"\n match = [user.login for user in self.__github.search_users(search)]\n return match[0] if len(match) == 1 else None\n\n def __try_user(self, term):\n if self.__is_email(term):\n return None\n try:\n return self.__github.get_user(term).login\n except github.GithubException as ex:\n if ex.status != 404:\n raise\n return None\n\n def __resolve_single(self, term):\n return (\n self.__search_commits(term)\n or self.__search_github(term)\n or self.__try_user(term)\n )\n\n def __resolve_cache(self, name, email):\n return self.__cache.get(f\"{name} <{email}>\", None)\n\n def __register(self, name, email, match):\n self.__cache[f\"{name} <{email}>\"] = match\n\n def resolve(self, people):\n logins, unknowns = set(), set()\n\n for name, email in people:\n if match := self.__resolve_cache(name, email):\n logins.add(match)\n elif match := self.__resolve_single(email):\n self.__register(name, email, match)\n logins.add(match)\n else:\n if match := self.__resolve_single(name):\n logins.add(match)\n else:\n unknowns.add((name, email))\n self.__register(name, email, match)\n\n return logins, unknowns\n\n def resolve_by_reviews(self, unknowns, email2pr):\n logins, new_unknowns = set(), set()\n for name, email in unknowns:\n commit = self.__modin_repo.get_commit(str(email2pr[email]))\n found = set()\n for pull in commit.get_pulls():\n for review in pull.get_reviews():\n user = review.user\n if user.name == name and (not user.email or user.email == email):\n found.add(user.login)\n\n if len(found) == 1:\n self.__register(name, email, list(found)[0])\n logins |= found\n else:\n new_unknowns.add((name, email))\n\n return logins, new_unknowns\n\n def __save(self):\n self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))\n\n\nclass GitWrapper:\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n\n def is_on_master(self):\n return self.repo.references[\"refs/heads/master\"] == self.repo.head\n\n def get_previous_release(self, rel_type):\n tags = [\n (entry, version.parse(entry.lstrip(\"refs/tags/\")))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n # filter away legacy versions (which aren't following the proper naming schema)\n tags = [(entry, ver) for entry, ver in tags if isinstance(ver, version.Version)]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n else:\n assert rel_type == \"patch\"\n prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])\n return prev_ref, self.repo.references[prev_ref].peel(), prev_ver\n\n def get_commits_upto(self, stop_commit):\n history = []\n for obj in self.repo.walk(self.repo.head.target):\n if obj.id == stop_commit.id:\n break\n history.append(obj)\n else:\n raise ValueError(\"Current HEAD is not derived from previous release\")\n return history\n\n def ensure_title_link(self, obj: pygit2.Commit):\n title = obj.message.splitlines()[0]\n if not re.match(r\".*\\(#(\\d+)\\)$\", title):\n title += f\" ({obj.short_id})\"\n return title\n\n\ndef make_notes(args):\n wrapper = GitWrapper()\n release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n sys.stderr.write(f\"Previous {release_type} release: {prev_ref}\\n\")\n\n next_major, next_minor, next_patch = prev_ver.release\n if release_type == \"minor\":\n next_minor += 1\n elif release_type == \"patch\":\n next_patch += 1\n else:\n raise ValueError(f\"Unexpected release type: {release_type}\")\n next_ver = version.Version(f\"{next_major}.{next_minor}.{next_patch}\")\n\n sys.stderr.write(f\"Computing release notes for {prev_ver} -> {next_ver}...\\n\")\n try:\n history = wrapper.get_commits_upto(prev_commit)\n except ValueError as ex:\n sys.stderr.write(\n f\"{ex}: did you forget to checkout correct branch or pull tags?\"\n )\n return 1\n if not history:\n sys.stderr.write(f\"No commits since {prev_ver} found, nothing to generate!\\n\")\n return 1\n\n titles = collections.defaultdict(list)\n people = set()\n email2commit, email2pr = {}, {}\n for obj in history:\n title = obj.message.splitlines()[0]\n titles[title.split(\"-\")[0]].append(obj)\n new_people = set(\n re.findall(\n r\"(?:(?:Signed-off-by|Co-authored-by):\\s*)([\\w\\s,]+?)\\s*<([^>]+)>\",\n obj.message,\n )\n )\n for _, email in new_people:\n email2pr[email] = obj.id\n people |= new_people\n email2commit[obj.author.email] = obj.id\n sys.stderr.write(f\"Found {len(history)} commit(s) since {prev_ref}\\n\")\n\n sys.stderr.write(\"Resolving contributors...\\n\")\n user_resolver = GithubUserResolver(email2commit, args.token)\n logins, unknowns = user_resolver.resolve(people)\n new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)\n logins |= new_logins\n sys.stderr.write(f\"Found {len(logins)} GitHub usernames.\\n\")\n if unknowns:\n sys.stderr.write(\n f\"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\\n\"\n )\n\n sections = [\n (\"Stability and Bugfixes\", \"FIX\"),\n (\"Performance enhancements\", \"PERF\"),\n (\"Refactor Codebase\", \"REFACTOR\"),\n (\"Update testing suite\", \"TEST\"),\n (\"Documentation improvements\", \"DOCS\"),\n (\"New Features\", \"FEAT\"),\n ]\n\n notes = rf\"\"\"Modin {next_ver}\n\n<Please fill in short release summary>\n\nKey Features and Updates Since {prev_ver}\n-------------------------------{'-' * len(str(prev_ver))}\n\"\"\"\n\n def _add_section(section, prs):\n nonlocal notes\n if prs:\n notes += f\"* {section}\\n\"\n notes += \"\\n\".join(\n [\n f\" * {wrapper.ensure_title_link(obj)}\"\n for obj in sorted(prs, key=lambda obj: obj.message)\n ]\n )\n notes += \"\\n\"\n\n for section, key in sections:\n _add_section(section, titles.pop(key, None))\n\n uncategorized = sum(titles.values(), [])\n _add_section(\"Uncategorized improvements\", uncategorized)\n\n notes += r\"\"\"\nContributors\n------------\n\"\"\"\n notes += \"\\n\".join(f\"@{login}\" for login in sorted(logins)) + \"\\n\"\n notes += (\n \"\\n\".join(\n f\"<unknown-login> {name} <{email}>\" for name, email in sorted(unknowns)\n )\n + \"\\n\"\n )\n\n sys.stdout.write(notes)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n \"--token\",\n type=str,\n default=\"\",\n help=\"GitHub token for queries (optional, bumps up rate limit)\",\n )\n parse.set_defaults(func=lambda _: parse.print_usage())\n subparsers = parse.add_subparsers()\n\n notes = subparsers.add_parser(\"notes\", help=\"Generate release notes\")\n notes.set_defaults(func=make_notes)\n\n args = parse.parse_args()\n sys.exit(args.func(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/release.py"}], "after_files": [{"content": "import re\nimport json\nimport atexit\nimport collections\nimport argparse\nfrom pathlib import Path\nimport sys\nfrom packaging import version\n\nimport pygit2\nimport github\n\n\nclass GithubUserResolver:\n def __init__(self, email2commit, token):\n self.__cache_file = Path(__file__).parent / \"gh-users-cache.json\"\n self.__cache = (\n json.loads(self.__cache_file.read_text())\n if self.__cache_file.exists()\n else {}\n )\n # filter unknown users hoping we'd be able to find them this time\n self.__cache = {key: value for key, value in self.__cache.items() if value}\n # using anonymous access if token not specified\n self.__github = github.Github(token or None)\n self.__modin_repo = self.__github.get_repo(\"modin-project/modin\")\n self.__email2commit = email2commit\n atexit.register(self.__save)\n\n def __search_commits(self, term):\n if commit := self.__email2commit.get(term):\n gh_commit = self.__modin_repo.get_commit(str(commit))\n return gh_commit.author.login\n return None\n\n @staticmethod\n def __is_email(term):\n return re.match(r\".*@.*\\..*\", term)\n\n def __search_github(self, term):\n search = f\"in:email {term}\" if self.__is_email(term) else f\"fullname:{term}\"\n match = [user.login for user in self.__github.search_users(search)]\n return match[0] if len(match) == 1 else None\n\n def __try_user(self, term):\n if self.__is_email(term):\n return None\n try:\n return self.__github.get_user(term).login\n except github.GithubException as ex:\n if ex.status != 404:\n raise\n return None\n\n def __resolve_single(self, term):\n return (\n self.__search_commits(term)\n or self.__search_github(term)\n or self.__try_user(term)\n )\n\n def __resolve_cache(self, name, email):\n return self.__cache.get(f\"{name} <{email}>\", None)\n\n def __register(self, name, email, match):\n self.__cache[f\"{name} <{email}>\"] = match\n\n def resolve(self, people):\n logins, unknowns = set(), set()\n\n for name, email in people:\n if match := self.__resolve_cache(name, email):\n logins.add(match)\n elif match := self.__resolve_single(email):\n self.__register(name, email, match)\n logins.add(match)\n else:\n if match := self.__resolve_single(name):\n logins.add(match)\n else:\n unknowns.add((name, email))\n self.__register(name, email, match)\n\n return logins, unknowns\n\n def resolve_by_reviews(self, unknowns, email2pr):\n logins, new_unknowns = set(), set()\n for name, email in unknowns:\n commit = self.__modin_repo.get_commit(str(email2pr[email]))\n found = set()\n for pull in commit.get_pulls():\n for review in pull.get_reviews():\n user = review.user\n if user.name == name and (not user.email or user.email == email):\n found.add(user.login)\n\n if len(found) == 1:\n self.__register(name, email, list(found)[0])\n logins |= found\n else:\n new_unknowns.add((name, email))\n\n return logins, new_unknowns\n\n def __save(self):\n self.__cache_file.write_text(json.dumps(self.__cache, indent=4, sort_keys=True))\n\n\nclass GitWrapper:\n def __init__(self):\n self.repo = pygit2.Repository(Path(__file__).parent)\n\n def is_on_master(self):\n return self.repo.references[\"refs/heads/master\"] == self.repo.head\n\n @staticmethod\n def __get_tag_version(entry):\n try:\n return version.parse(entry.lstrip(\"refs/tags/\"))\n except version.InvalidVersion as ex:\n return f'<bad version \"{entry}\": {ex}>'\n\n def get_previous_release(self, rel_type):\n tags = [\n (entry, self.__get_tag_version(entry))\n for entry in self.repo.references\n if entry.startswith(\"refs/tags/\")\n ]\n # filter away legacy versions (which aren't following the proper naming schema);\n # also skip pre-releases\n tags = [\n (entry, ver)\n for entry, ver in tags\n if isinstance(ver, version.Version) and not ver.pre\n ]\n if rel_type == \"minor\":\n # leave only minor releases\n tags = [(entry, ver) for entry, ver in tags if ver.micro == 0]\n else:\n assert rel_type == \"patch\"\n prev_ref, prev_ver = max(tags, key=lambda pair: pair[1])\n return prev_ref, self.repo.references[prev_ref].peel(), prev_ver\n\n def get_commits_upto(self, stop_commit):\n history = []\n for obj in self.repo.walk(self.repo.head.target):\n if obj.id == stop_commit.id:\n break\n history.append(obj)\n else:\n raise ValueError(\"Current HEAD is not derived from previous release\")\n return history\n\n def ensure_title_link(self, obj: pygit2.Commit):\n title = obj.message.splitlines()[0]\n if not re.match(r\".*\\(#(\\d+)\\)$\", title):\n title += f\" ({obj.short_id})\"\n return title\n\n\ndef make_notes(args):\n wrapper = GitWrapper()\n release_type = \"minor\" if wrapper.is_on_master() else \"patch\"\n sys.stderr.write(f\"Detected release type: {release_type}\\n\")\n\n prev_ref, prev_commit, prev_ver = wrapper.get_previous_release(release_type)\n sys.stderr.write(f\"Previous {release_type} release: {prev_ref}\\n\")\n\n next_major, next_minor, next_patch = prev_ver.release\n if release_type == \"minor\":\n next_minor += 1\n elif release_type == \"patch\":\n next_patch += 1\n else:\n raise ValueError(f\"Unexpected release type: {release_type}\")\n next_ver = version.Version(f\"{next_major}.{next_minor}.{next_patch}\")\n\n sys.stderr.write(f\"Computing release notes for {prev_ver} -> {next_ver}...\\n\")\n try:\n history = wrapper.get_commits_upto(prev_commit)\n except ValueError as ex:\n sys.stderr.write(\n f\"{ex}: did you forget to checkout correct branch or pull tags?\"\n )\n return 1\n if not history:\n sys.stderr.write(f\"No commits since {prev_ver} found, nothing to generate!\\n\")\n return 1\n\n titles = collections.defaultdict(list)\n people = set()\n email2commit, email2pr = {}, {}\n for obj in history:\n title = obj.message.splitlines()[0]\n titles[title.split(\"-\")[0]].append(obj)\n new_people = set(\n re.findall(\n r\"(?:(?:Signed-off-by|Co-authored-by):\\s*)([\\w\\s,]+?)\\s*<([^>]+)>\",\n obj.message,\n )\n )\n for _, email in new_people:\n email2pr[email] = obj.id\n people |= new_people\n email2commit[obj.author.email] = obj.id\n sys.stderr.write(f\"Found {len(history)} commit(s) since {prev_ref}\\n\")\n\n sys.stderr.write(\"Resolving contributors...\\n\")\n user_resolver = GithubUserResolver(email2commit, args.token)\n logins, unknowns = user_resolver.resolve(people)\n new_logins, unknowns = user_resolver.resolve_by_reviews(unknowns, email2pr)\n logins |= new_logins\n sys.stderr.write(f\"Found {len(logins)} GitHub usernames.\\n\")\n if unknowns:\n sys.stderr.write(\n f\"Warning! Failed to resolve {len(unknowns)} usernames, please resolve them manually!\\n\"\n )\n\n sections = [\n (\"Stability and Bugfixes\", \"FIX\"),\n (\"Performance enhancements\", \"PERF\"),\n (\"Refactor Codebase\", \"REFACTOR\"),\n (\"Update testing suite\", \"TEST\"),\n (\"Documentation improvements\", \"DOCS\"),\n (\"New Features\", \"FEAT\"),\n ]\n\n notes = rf\"\"\"Modin {next_ver}\n\n<Please fill in short release summary>\n\nKey Features and Updates Since {prev_ver}\n-------------------------------{'-' * len(str(prev_ver))}\n\"\"\"\n\n def _add_section(section, prs):\n nonlocal notes\n if prs:\n notes += f\"* {section}\\n\"\n notes += \"\\n\".join(\n [\n f\" * {wrapper.ensure_title_link(obj)}\"\n for obj in sorted(prs, key=lambda obj: obj.message)\n ]\n )\n notes += \"\\n\"\n\n for section, key in sections:\n _add_section(section, titles.pop(key, None))\n\n uncategorized = sum(titles.values(), [])\n _add_section(\"Uncategorized improvements\", uncategorized)\n\n notes += r\"\"\"\nContributors\n------------\n\"\"\"\n notes += \"\\n\".join(f\"@{login}\" for login in sorted(logins)) + \"\\n\"\n notes += (\n \"\\n\".join(\n f\"<unknown-login> {name} <{email}>\" for name, email in sorted(unknowns)\n )\n + \"\\n\"\n )\n\n sys.stdout.write(notes)\n\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument(\n \"--token\",\n type=str,\n default=\"\",\n help=\"GitHub token for queries (optional, bumps up rate limit)\",\n )\n parse.set_defaults(func=lambda _: parse.print_usage())\n subparsers = parse.add_subparsers()\n\n notes = subparsers.add_parser(\"notes\", help=\"Generate release notes\")\n notes.set_defaults(func=make_notes)\n\n args = parse.parse_args()\n sys.exit(args.func(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/release.py"}]}
3,708
317
gh_patches_debug_7430
rasdani/github-patches
git_diff
deepset-ai__haystack-5145
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PromptNode does't support 16k tokens. **Describe the bug** While gpt-3.5-turbo-16k support was added, PromptNode is still setting a token limit of 4096. **Error message** The prompt or the messages are too long (447 tokens). The length of the prompt or messages and the answer (30000 tokens) should be within the max token limit (16384 tokens). Reduce the length of the prompt or messages. **Expected behavior** PromptNode recognizes 16k models and adjust the token limit. **To Reproduce** Set the model of PromptNode gpt-3.5-turbo-16k and attempt to send a prompt larger than 5k tokens. **FAQ Check** - [ x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)? **System:** - OS: MacOS Ventura - GPU/CPU: CPU - Haystack version (commit or version number): latest --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `haystack/utils/openai_utils.py` Content: ``` 1 """Utils for using OpenAI API""" 2 import os 3 import logging 4 import platform 5 import json 6 from typing import Dict, Union, Tuple, Optional, List 7 import requests 8 import tenacity 9 import tiktoken 10 from tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING 11 12 from haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError 13 from haystack.environment import ( 14 HAYSTACK_REMOTE_API_BACKOFF_SEC, 15 HAYSTACK_REMOTE_API_MAX_RETRIES, 16 HAYSTACK_REMOTE_API_TIMEOUT_SEC, 17 ) 18 19 logger = logging.getLogger(__name__) 20 21 22 machine = platform.machine().lower() 23 system = platform.system() 24 25 OPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30)) 26 OPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10)) 27 OPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5)) 28 29 30 def load_openai_tokenizer(tokenizer_name: str): 31 """Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast 32 from the transformers library. 33 34 :param tokenizer_name: The name of the tokenizer to load. 35 """ 36 37 logger.debug("Using tiktoken %s tokenizer", tokenizer_name) 38 return tiktoken.get_encoding(tokenizer_name) 39 40 41 def count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int: 42 """Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided. 43 44 :param messages: The messages to be tokenized. 45 :param tokenizer: An OpenAI tokenizer. 46 """ 47 # adapted from https://platform.openai.com/docs/guides/chat/introduction 48 # should be kept up to date 49 num_tokens = 0 50 for message in messages: 51 num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n 52 for key, value in message.items(): 53 num_tokens += len(tokenizer.encode(value)) 54 if key == "name": # if there's a name, the role is omitted 55 num_tokens += -1 # role is always required and always 1 token 56 num_tokens += 2 # every reply is primed with <im_start>assistant 57 return num_tokens 58 59 60 def _openai_text_completion_tokenization_details(model_name: str): 61 """Return the tokenizer name and max tokens limit for a given OpenAI `model_name`. 62 63 :param model_name: Name of the OpenAI model. 64 """ 65 tokenizer_name = "gpt2" 66 max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3 67 68 if model_name == "gpt-35-turbo": 69 # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72 70 model_tokenizer = "cl100k_base" 71 elif model_name in MODEL_TO_ENCODING: 72 model_tokenizer = MODEL_TO_ENCODING[model_name] 73 else: 74 for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items(): 75 if model_name.startswith(model_prefix): 76 model_tokenizer = tokenizer 77 break 78 79 if model_tokenizer: 80 # Based on OpenAI models page, 'davinci' considers have 2049 tokens, 81 ## therefore, it is better to add `text-davinci` instead to the condition. 82 ## Ref: https://platform.openai.com/docs/models/gpt-3-5 83 ## https://platform.openai.com/docs/models/gpt-3 84 if "text-davinci" in model_name: 85 max_tokens_limit = 4097 86 tokenizer_name = model_tokenizer 87 elif model_name.startswith("gpt-3"): 88 max_tokens_limit = 4096 89 tokenizer_name = model_tokenizer 90 # Ref: https://platform.openai.com/docs/models/gpt-4 91 elif model_name.startswith("gpt-4-32k"): 92 max_tokens_limit = 32768 # tokens 93 tokenizer_name = model_tokenizer 94 elif model_name.startswith("gpt-4"): 95 max_tokens_limit = 8192 # tokens 96 tokenizer_name = model_tokenizer 97 else: 98 tokenizer_name = model_tokenizer 99 100 return tokenizer_name, max_tokens_limit 101 102 103 @tenacity.retry( 104 reraise=True, 105 retry=tenacity.retry_if_exception_type(OpenAIError) 106 and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError), 107 wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF), 108 stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES), 109 ) 110 def openai_request( 111 url: str, 112 headers: Dict, 113 payload: Dict, 114 timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT, 115 read_response: Optional[bool] = True, 116 **kwargs, 117 ): 118 """Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`. 119 120 :param url: The URL of the OpenAI API. 121 :param headers: Dictionary of HTTP Headers to send with the :class:`Request`. 122 :param payload: The payload to send with the request. 123 :param timeout: The timeout length of the request. The default is 30s. 124 :param read_response: Whether to read the response as JSON. The default is True. 125 """ 126 response = requests.request("POST", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs) 127 if read_response: 128 json_response = json.loads(response.text) 129 130 if response.status_code != 200: 131 openai_error: OpenAIError 132 if response.status_code == 429: 133 openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}") 134 elif response.status_code == 401: 135 openai_error = OpenAIUnauthorizedError(f"API key is invalid: {response.text}") 136 else: 137 openai_error = OpenAIError( 138 f"OpenAI returned an error.\n" 139 f"Status code: {response.status_code}\n" 140 f"Response body: {response.text}", 141 status_code=response.status_code, 142 ) 143 raise openai_error 144 if read_response: 145 return json_response 146 else: 147 return response 148 149 150 def _check_openai_finish_reason(result: Dict, payload: Dict) -> None: 151 """Check the `finish_reason` the answers returned by OpenAI completions endpoint. 152 If the `finish_reason` is `length` or `content_filter`, log a warning to the user. 153 154 :param result: The result returned from the OpenAI API. 155 :param payload: The payload sent to the OpenAI API. 156 """ 157 number_of_truncated_completions = sum(1 for ans in result["choices"] if ans["finish_reason"] == "length") 158 if number_of_truncated_completions > 0: 159 logger.warning( 160 "%s out of the %s completions have been truncated before reaching a natural stopping point. " 161 "Increase the max_tokens parameter to allow for longer completions.", 162 number_of_truncated_completions, 163 payload["n"], 164 ) 165 166 number_of_content_filtered_completions = sum( 167 1 for ans in result["choices"] if ans["finish_reason"] == "content_filter" 168 ) 169 if number_of_content_filtered_completions > 0: 170 logger.warning( 171 "%s out of the %s completions have omitted content due to a flag from OpenAI content filters.", 172 number_of_truncated_completions, 173 payload["n"], 174 ) 175 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py --- a/haystack/utils/openai_utils.py +++ b/haystack/utils/openai_utils.py @@ -84,6 +84,9 @@ if "text-davinci" in model_name: max_tokens_limit = 4097 tokenizer_name = model_tokenizer + elif model_name.startswith("gpt-3.5-turbo-16k"): + max_tokens_limit = 16384 + tokenizer_name = model_tokenizer elif model_name.startswith("gpt-3"): max_tokens_limit = 4096 tokenizer_name = model_tokenizer
{"golden_diff": "diff --git a/haystack/utils/openai_utils.py b/haystack/utils/openai_utils.py\n--- a/haystack/utils/openai_utils.py\n+++ b/haystack/utils/openai_utils.py\n@@ -84,6 +84,9 @@\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n+ elif model_name.startswith(\"gpt-3.5-turbo-16k\"):\n+ max_tokens_limit = 16384\n+ tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n", "issue": "PromptNode does't support 16k tokens.\n**Describe the bug**\r\nWhile gpt-3.5-turbo-16k support was added, PromptNode is still setting a token limit of 4096.\r\n\r\n**Error message**\r\nThe prompt or the messages are too long (447 tokens). The length of the prompt or messages and the answer (30000 tokens) should be within the max token limit (16384 tokens). Reduce the length of the prompt or messages.\r\n\r\n**Expected behavior**\r\nPromptNode recognizes 16k models and adjust the token limit.\r\n\r\n**To Reproduce**\r\nSet the model of PromptNode gpt-3.5-turbo-16k and attempt to send a prompt larger than 5k tokens.\r\n\r\n**FAQ Check**\r\n- [ x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?\r\n\r\n**System:**\r\n - OS: MacOS Ventura\r\n - GPU/CPU: CPU\r\n - Haystack version (commit or version number): latest\r\n\n", "before_files": [{"content": "\"\"\"Utils for using OpenAI API\"\"\"\nimport os\nimport logging\nimport platform\nimport json\nfrom typing import Dict, Union, Tuple, Optional, List\nimport requests\nimport tenacity\nimport tiktoken\nfrom tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING\n\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError\nfrom haystack.environment import (\n HAYSTACK_REMOTE_API_BACKOFF_SEC,\n HAYSTACK_REMOTE_API_MAX_RETRIES,\n HAYSTACK_REMOTE_API_TIMEOUT_SEC,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nmachine = platform.machine().lower()\nsystem = platform.system()\n\nOPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))\nOPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))\nOPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))\n\n\ndef load_openai_tokenizer(tokenizer_name: str):\n \"\"\"Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast\n from the transformers library.\n\n :param tokenizer_name: The name of the tokenizer to load.\n \"\"\"\n\n logger.debug(\"Using tiktoken %s tokenizer\", tokenizer_name)\n return tiktoken.get_encoding(tokenizer_name)\n\n\ndef count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:\n \"\"\"Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.\n\n :param messages: The messages to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n # adapted from https://platform.openai.com/docs/guides/chat/introduction\n # should be kept up to date\n num_tokens = 0\n for message in messages:\n num_tokens += 4 # every message follows <im_start>{role/name}\\n{content}<im_end>\\n\n for key, value in message.items():\n num_tokens += len(tokenizer.encode(value))\n if key == \"name\": # if there's a name, the role is omitted\n num_tokens += -1 # role is always required and always 1 token\n num_tokens += 2 # every reply is primed with <im_start>assistant\n return num_tokens\n\n\ndef _openai_text_completion_tokenization_details(model_name: str):\n \"\"\"Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.\n\n :param model_name: Name of the OpenAI model.\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n\n if model_name == \"gpt-35-turbo\":\n # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n model_tokenizer = \"cl100k_base\"\n elif model_name in MODEL_TO_ENCODING:\n model_tokenizer = MODEL_TO_ENCODING[model_name]\n else:\n for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():\n if model_name.startswith(model_prefix):\n model_tokenizer = tokenizer\n break\n\n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n ## therefore, it is better to add `text-davinci` instead to the condition.\n ## Ref: https://platform.openai.com/docs/models/gpt-3-5\n ## https://platform.openai.com/docs/models/gpt-3\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n # Ref: https://platform.openai.com/docs/models/gpt-4\n elif model_name.startswith(\"gpt-4-32k\"):\n max_tokens_limit = 32768 # tokens\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-4\"):\n max_tokens_limit = 8192 # tokens\n tokenizer_name = model_tokenizer\n else:\n tokenizer_name = model_tokenizer\n\n return tokenizer_name, max_tokens_limit\n\n\[email protected](\n reraise=True,\n retry=tenacity.retry_if_exception_type(OpenAIError)\n and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),\n wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),\n stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),\n)\ndef openai_request(\n url: str,\n headers: Dict,\n payload: Dict,\n timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,\n read_response: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.\n\n :param url: The URL of the OpenAI API.\n :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.\n :param payload: The payload to send with the request.\n :param timeout: The timeout length of the request. The default is 30s.\n :param read_response: Whether to read the response as JSON. The default is True.\n \"\"\"\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)\n if read_response:\n json_response = json.loads(response.text)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n elif response.status_code == 401:\n openai_error = OpenAIUnauthorizedError(f\"API key is invalid: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n if read_response:\n return json_response\n else:\n return response\n\n\ndef _check_openai_finish_reason(result: Dict, payload: Dict) -> None:\n \"\"\"Check the `finish_reason` the answers returned by OpenAI completions endpoint.\n If the `finish_reason` is `length` or `content_filter`, log a warning to the user.\n\n :param result: The result returned from the OpenAI API.\n :param payload: The payload sent to the OpenAI API.\n \"\"\"\n number_of_truncated_completions = sum(1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"length\")\n if number_of_truncated_completions > 0:\n logger.warning(\n \"%s out of the %s completions have been truncated before reaching a natural stopping point. \"\n \"Increase the max_tokens parameter to allow for longer completions.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n\n number_of_content_filtered_completions = sum(\n 1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"content_filter\"\n )\n if number_of_content_filtered_completions > 0:\n logger.warning(\n \"%s out of the %s completions have omitted content due to a flag from OpenAI content filters.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n", "path": "haystack/utils/openai_utils.py"}], "after_files": [{"content": "\"\"\"Utils for using OpenAI API\"\"\"\nimport os\nimport logging\nimport platform\nimport json\nfrom typing import Dict, Union, Tuple, Optional, List\nimport requests\nimport tenacity\nimport tiktoken\nfrom tiktoken.model import MODEL_TO_ENCODING, MODEL_PREFIX_TO_ENCODING\n\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError, OpenAIUnauthorizedError\nfrom haystack.environment import (\n HAYSTACK_REMOTE_API_BACKOFF_SEC,\n HAYSTACK_REMOTE_API_MAX_RETRIES,\n HAYSTACK_REMOTE_API_TIMEOUT_SEC,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nmachine = platform.machine().lower()\nsystem = platform.system()\n\nOPENAI_TIMEOUT = float(os.environ.get(HAYSTACK_REMOTE_API_TIMEOUT_SEC, 30))\nOPENAI_BACKOFF = int(os.environ.get(HAYSTACK_REMOTE_API_BACKOFF_SEC, 10))\nOPENAI_MAX_RETRIES = int(os.environ.get(HAYSTACK_REMOTE_API_MAX_RETRIES, 5))\n\n\ndef load_openai_tokenizer(tokenizer_name: str):\n \"\"\"Load either the tokenizer from tiktoken (if the library is available) or fallback to the GPT2TokenizerFast\n from the transformers library.\n\n :param tokenizer_name: The name of the tokenizer to load.\n \"\"\"\n\n logger.debug(\"Using tiktoken %s tokenizer\", tokenizer_name)\n return tiktoken.get_encoding(tokenizer_name)\n\n\ndef count_openai_tokens_messages(messages: List[Dict[str, str]], tokenizer) -> int:\n \"\"\"Count the number of tokens in `messages` based on the OpenAI `tokenizer` provided.\n\n :param messages: The messages to be tokenized.\n :param tokenizer: An OpenAI tokenizer.\n \"\"\"\n # adapted from https://platform.openai.com/docs/guides/chat/introduction\n # should be kept up to date\n num_tokens = 0\n for message in messages:\n num_tokens += 4 # every message follows <im_start>{role/name}\\n{content}<im_end>\\n\n for key, value in message.items():\n num_tokens += len(tokenizer.encode(value))\n if key == \"name\": # if there's a name, the role is omitted\n num_tokens += -1 # role is always required and always 1 token\n num_tokens += 2 # every reply is primed with <im_start>assistant\n return num_tokens\n\n\ndef _openai_text_completion_tokenization_details(model_name: str):\n \"\"\"Return the tokenizer name and max tokens limit for a given OpenAI `model_name`.\n\n :param model_name: Name of the OpenAI model.\n \"\"\"\n tokenizer_name = \"gpt2\"\n max_tokens_limit = 2049 # Based on this ref: https://platform.openai.com/docs/models/gpt-3\n\n if model_name == \"gpt-35-turbo\":\n # covering the lack of support in Tiktoken. https://github.com/openai/tiktoken/pull/72\n model_tokenizer = \"cl100k_base\"\n elif model_name in MODEL_TO_ENCODING:\n model_tokenizer = MODEL_TO_ENCODING[model_name]\n else:\n for model_prefix, tokenizer in MODEL_PREFIX_TO_ENCODING.items():\n if model_name.startswith(model_prefix):\n model_tokenizer = tokenizer\n break\n\n if model_tokenizer:\n # Based on OpenAI models page, 'davinci' considers have 2049 tokens,\n ## therefore, it is better to add `text-davinci` instead to the condition.\n ## Ref: https://platform.openai.com/docs/models/gpt-3-5\n ## https://platform.openai.com/docs/models/gpt-3\n if \"text-davinci\" in model_name:\n max_tokens_limit = 4097\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3.5-turbo-16k\"):\n max_tokens_limit = 16384\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-3\"):\n max_tokens_limit = 4096\n tokenizer_name = model_tokenizer\n # Ref: https://platform.openai.com/docs/models/gpt-4\n elif model_name.startswith(\"gpt-4-32k\"):\n max_tokens_limit = 32768 # tokens\n tokenizer_name = model_tokenizer\n elif model_name.startswith(\"gpt-4\"):\n max_tokens_limit = 8192 # tokens\n tokenizer_name = model_tokenizer\n else:\n tokenizer_name = model_tokenizer\n\n return tokenizer_name, max_tokens_limit\n\n\[email protected](\n reraise=True,\n retry=tenacity.retry_if_exception_type(OpenAIError)\n and tenacity.retry_if_not_exception_type(OpenAIUnauthorizedError),\n wait=tenacity.wait_exponential(multiplier=OPENAI_BACKOFF),\n stop=tenacity.stop_after_attempt(OPENAI_MAX_RETRIES),\n)\ndef openai_request(\n url: str,\n headers: Dict,\n payload: Dict,\n timeout: Union[float, Tuple[float, float]] = OPENAI_TIMEOUT,\n read_response: Optional[bool] = True,\n **kwargs,\n):\n \"\"\"Make a request to the OpenAI API given a `url`, `headers`, `payload`, and `timeout`.\n\n :param url: The URL of the OpenAI API.\n :param headers: Dictionary of HTTP Headers to send with the :class:`Request`.\n :param payload: The payload to send with the request.\n :param timeout: The timeout length of the request. The default is 30s.\n :param read_response: Whether to read the response as JSON. The default is True.\n \"\"\"\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload), timeout=timeout, **kwargs)\n if read_response:\n json_response = json.loads(response.text)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n elif response.status_code == 401:\n openai_error = OpenAIUnauthorizedError(f\"API key is invalid: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n if read_response:\n return json_response\n else:\n return response\n\n\ndef _check_openai_finish_reason(result: Dict, payload: Dict) -> None:\n \"\"\"Check the `finish_reason` the answers returned by OpenAI completions endpoint.\n If the `finish_reason` is `length` or `content_filter`, log a warning to the user.\n\n :param result: The result returned from the OpenAI API.\n :param payload: The payload sent to the OpenAI API.\n \"\"\"\n number_of_truncated_completions = sum(1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"length\")\n if number_of_truncated_completions > 0:\n logger.warning(\n \"%s out of the %s completions have been truncated before reaching a natural stopping point. \"\n \"Increase the max_tokens parameter to allow for longer completions.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n\n number_of_content_filtered_completions = sum(\n 1 for ans in result[\"choices\"] if ans[\"finish_reason\"] == \"content_filter\"\n )\n if number_of_content_filtered_completions > 0:\n logger.warning(\n \"%s out of the %s completions have omitted content due to a flag from OpenAI content filters.\",\n number_of_truncated_completions,\n payload[\"n\"],\n )\n", "path": "haystack/utils/openai_utils.py"}]}
2,584
160
gh_patches_debug_1142
rasdani/github-patches
git_diff
nipy__nipype-2852
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- nipype/conftest.py should be excluded from API documentation ### Summary The auto-generated API docs include `conftest.py`, which has a fixture. Pytest has turned calling a fixture directly into an error, and apparently the fixture is getting called when the docs are generated. This is what's currently breaking the Circle builds. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/build_interface_docs.py` Content: ``` 1 #!/usr/bin/env python 2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 3 # vi: set ft=python sts=4 ts=4 sw=4 et: 4 """Script to auto-generate interface docs. 5 """ 6 from __future__ import print_function, unicode_literals 7 # stdlib imports 8 import os 9 import sys 10 11 # ***************************************************************************** 12 if __name__ == '__main__': 13 nipypepath = os.path.abspath('..') 14 sys.path.insert(1, nipypepath) 15 # local imports 16 from interfacedocgen import InterfaceHelpWriter 17 package = 'nipype' 18 outdir = os.path.join('interfaces', 'generated') 19 docwriter = InterfaceHelpWriter(package) 20 # Packages that should not be included in generated API docs. 21 docwriter.package_skip_patterns += [ 22 '\.external$', 23 '\.fixes$', 24 '\.utils$', 25 '\.pipeline', 26 '\.testing', 27 '\.caching', 28 '\.scripts', 29 ] 30 # Modules that should not be included in generated API docs. 31 docwriter.module_skip_patterns += [ 32 '\.version$', 33 '\.interfaces\.base$', 34 '\.interfaces\.matlab$', 35 '\.interfaces\.rest$', 36 '\.interfaces\.pymvpa$', 37 '\.interfaces\.slicer\.generate_classes$', 38 '\.interfaces\.spm\.base$', 39 '\.interfaces\.traits', 40 '\.pipeline\.alloy$', 41 '\.pipeline\.s3_node_wrapper$', 42 '\.testing', 43 '\.scripts', 44 ] 45 docwriter.class_skip_patterns += [ 46 'AFNICommand', 47 'ANTS', 48 'FSLCommand', 49 'FS', 50 'Info', 51 '^SPM', 52 'Tester', 53 'Spec$', 54 'Numpy' 55 # NipypeTester raises an 56 # exception when instantiated in 57 # InterfaceHelpWriter.generate_api_doc 58 'NipypeTester', 59 ] 60 docwriter.write_api_docs(outdir) 61 docwriter.write_index(outdir, 'gen', relative_to='interfaces') 62 print('%d files written' % len(docwriter.written_modules)) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py --- a/tools/build_interface_docs.py +++ b/tools/build_interface_docs.py @@ -41,6 +41,7 @@ '\.pipeline\.s3_node_wrapper$', '\.testing', '\.scripts', + '\.conftest', ] docwriter.class_skip_patterns += [ 'AFNICommand',
{"golden_diff": "diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py\n--- a/tools/build_interface_docs.py\n+++ b/tools/build_interface_docs.py\n@@ -41,6 +41,7 @@\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n+ '\\.conftest',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n", "issue": "nipype/conftest.py should be excluded from API documentation\n### Summary\r\n\r\nThe auto-generated API docs include `conftest.py`, which has a fixture. Pytest has turned calling a fixture directly into an error, and apparently the fixture is getting called when the docs are generated.\r\n\r\nThis is what's currently breaking the Circle builds.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += [\n '\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += [\n '\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n '\\.conftest',\n ]\n docwriter.class_skip_patterns += [\n 'AFNICommand',\n 'ANTS',\n 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}]}
915
89
gh_patches_debug_32346
rasdani/github-patches
git_diff
pwndbg__pwndbg-1800
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix Arch Linux tests The Arch Linux tests are failing due to new versions of pip needing to use the `--break-system-packages` flag to install packages globally. We should update the CI to use a virtualenv here. CI: try to bring back Arch Linux CI Let's see if this works now with virtualenv setup <!-- Please make sure to read the testing and linting instructions at https://github.com/pwndbg/pwndbg/blob/dev/DEVELOPING.md before creating a PR --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/wrappers/readelf.py` Content: ``` 1 from enum import Enum 2 from typing import Dict 3 from typing import List 4 5 import pwndbg.wrappers 6 7 cmd_name = "readelf" 8 9 10 class RelocationType(Enum): 11 # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf 12 # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h 13 JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT 14 GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT 15 IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE 16 17 18 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 19 def get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]: 20 # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO 21 cmd = get_got_entry.cmd + ["--relocs", "--wide", local_path] 22 readelf_out = pwndbg.wrappers.call_cmd(cmd) 23 24 entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType} 25 for line in readelf_out.splitlines(): 26 if not line or not line[0].isdigit(): 27 continue 28 category = line.split()[2] 29 # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt 30 for c in RelocationType: 31 if c.name in category: 32 entries[c].append(line) 33 return entries 34 ``` Path: `gdbinit.py` Content: ``` 1 import cProfile 2 import glob 3 import locale 4 import os 5 import sys 6 import time 7 from glob import glob 8 from os import environ 9 from os import path 10 11 _profiler = cProfile.Profile() 12 13 _start_time = None 14 if environ.get("PWNDBG_PROFILE") == "1": 15 _start_time = time.time() 16 _profiler.enable() 17 18 directory, file = path.split(__file__) 19 directory = path.expanduser(directory) 20 directory = path.abspath(directory) 21 22 # Get virtualenv's site-packages path 23 venv_path = os.environ.get("PWNDBG_VENV_PATH") 24 if not venv_path: 25 venv_path = os.path.join(directory, ".venv") 26 27 if not os.path.exists(venv_path): 28 print(f"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh") 29 sys.exit(1) 30 31 site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0] 32 33 # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc) 34 bin_path = os.path.join(venv_path, "bin") 35 os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH") 36 37 # Add gdb-pt-dump directory to sys.path so it can be imported 38 gdbpt = path.join(directory, "gdb-pt-dump") 39 sys.path.append(directory) 40 sys.path.append(site_pkgs_path) 41 sys.path.append(gdbpt) 42 43 # warn if the user has different encoding than utf-8 44 encoding = locale.getpreferredencoding() 45 46 if encoding != "UTF-8": 47 print("******") 48 print(f"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.") 49 print("You might try launching GDB with:") 50 print(" LC_CTYPE=C.UTF-8 gdb") 51 print( 52 "If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command" 53 ) 54 print("******") 55 56 environ["PWNLIB_NOTERM"] = "1" 57 58 import pwndbg # noqa: F401 59 import pwndbg.profiling 60 61 pwndbg.profiling.init(_profiler, _start_time) 62 if environ.get("PWNDBG_PROFILE") == "1": 63 pwndbg.profiling.profiler.stop("pwndbg-load.pstats") 64 pwndbg.profiling.profiler.start() 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gdbinit.py b/gdbinit.py --- a/gdbinit.py +++ b/gdbinit.py @@ -2,6 +2,7 @@ import glob import locale import os +import site import sys import time from glob import glob @@ -30,6 +31,14 @@ site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0] +# add virtualenv's site-packages to sys.path and run .pth files +site.addsitedir(site_pkgs_path) + +# remove existing, system-level site-packages from sys.path +for site_packages in site.getsitepackages(): + if site_packages in sys.path: + sys.path.remove(site_packages) + # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc) bin_path = os.path.join(venv_path, "bin") os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH") @@ -37,7 +46,6 @@ # Add gdb-pt-dump directory to sys.path so it can be imported gdbpt = path.join(directory, "gdb-pt-dump") sys.path.append(directory) -sys.path.append(site_pkgs_path) sys.path.append(gdbpt) # warn if the user has different encoding than utf-8 diff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py --- a/pwndbg/wrappers/readelf.py +++ b/pwndbg/wrappers/readelf.py @@ -23,7 +23,7 @@ entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType} for line in readelf_out.splitlines(): - if not line or not line[0].isdigit(): + if not line or not line[0].isdigit() or " " not in line: continue category = line.split()[2] # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt
{"golden_diff": "diff --git a/gdbinit.py b/gdbinit.py\n--- a/gdbinit.py\n+++ b/gdbinit.py\n@@ -2,6 +2,7 @@\n import glob\n import locale\n import os\n+import site\n import sys\n import time\n from glob import glob\n@@ -30,6 +31,14 @@\n \n site_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n \n+# add virtualenv's site-packages to sys.path and run .pth files\n+site.addsitedir(site_pkgs_path)\n+\n+# remove existing, system-level site-packages from sys.path\n+for site_packages in site.getsitepackages():\n+ if site_packages in sys.path:\n+ sys.path.remove(site_packages)\n+\n # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\n bin_path = os.path.join(venv_path, \"bin\")\n os.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n@@ -37,7 +46,6 @@\n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n sys.path.append(directory)\n-sys.path.append(site_pkgs_path)\n sys.path.append(gdbpt)\n \n # warn if the user has different encoding than utf-8\ndiff --git a/pwndbg/wrappers/readelf.py b/pwndbg/wrappers/readelf.py\n--- a/pwndbg/wrappers/readelf.py\n+++ b/pwndbg/wrappers/readelf.py\n@@ -23,7 +23,7 @@\n \n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n- if not line or not line[0].isdigit():\n+ if not line or not line[0].isdigit() or \" \" not in line:\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n", "issue": "Fix Arch Linux tests\nThe Arch Linux tests are failing due to new versions of pip needing to use the `--break-system-packages` flag to install packages globally. We should update the CI to use a virtualenv here.\nCI: try to bring back Arch Linux CI\nLet's see if this works now with virtualenv setup\r\n\r\n<!-- Please make sure to read the testing and linting instructions at https://github.com/pwndbg/pwndbg/blob/dev/DEVELOPING.md before creating a PR -->\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom typing import Dict\nfrom typing import List\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\n\nclass RelocationType(Enum):\n # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf\n # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h\n JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT\n GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT\n IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE\n\n\[email protected](cmd_name)\ndef get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]:\n # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO\n cmd = get_got_entry.cmd + [\"--relocs\", \"--wide\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n if not line or not line[0].isdigit():\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n for c in RelocationType:\n if c.name in category:\n entries[c].append(line)\n return entries\n", "path": "pwndbg/wrappers/readelf.py"}, {"content": "import cProfile\nimport glob\nimport locale\nimport os\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\ndirectory, file = path.split(__file__)\ndirectory = path.expanduser(directory)\ndirectory = path.abspath(directory)\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\nif not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\nsite_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n# Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\nbin_path = os.path.join(venv_path, \"bin\")\nos.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n# Add gdb-pt-dump directory to sys.path so it can be imported\ngdbpt = path.join(directory, \"gdb-pt-dump\")\nsys.path.append(directory)\nsys.path.append(site_pkgs_path)\nsys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}], "after_files": [{"content": "from enum import Enum\nfrom typing import Dict\nfrom typing import List\n\nimport pwndbg.wrappers\n\ncmd_name = \"readelf\"\n\n\nclass RelocationType(Enum):\n # For x86_64, some details about these flag can be found in 4.4.1 Relocation Types in https://www.intel.com/content/dam/develop/external/us/en/documents/mpx-linux64-abi.pdf\n # The definitions of these flags can be found in this file: https://elixir.bootlin.com/glibc/glibc-2.37/source/elf/elf.h\n JUMP_SLOT = 1 # e.g.: R_X86_64_JUMP_SLOT\n GLOB_DAT = 2 # e.g.: R_X86_64_GLOB_DAT\n IRELATIVE = 3 # e.g.: R_X86_64_IRELATIVE\n\n\[email protected](cmd_name)\ndef get_got_entry(local_path: str) -> Dict[RelocationType, List[str]]:\n # --wide is for showing the full information, e.g.: R_X86_64_JUMP_SLOT instead of R_X86_64_JUMP_SLO\n cmd = get_got_entry.cmd + [\"--relocs\", \"--wide\", local_path]\n readelf_out = pwndbg.wrappers.call_cmd(cmd)\n\n entries: Dict[RelocationType, List[str]] = {category: [] for category in RelocationType}\n for line in readelf_out.splitlines():\n if not line or not line[0].isdigit() or \" \" not in line:\n continue\n category = line.split()[2]\n # TODO/FIXME: There's a bug here, somehow the IRELATIVE relocation might point to somewhere in .data.rel.ro, which is not in .got or .got.plt\n for c in RelocationType:\n if c.name in category:\n entries[c].append(line)\n return entries\n", "path": "pwndbg/wrappers/readelf.py"}, {"content": "import cProfile\nimport glob\nimport locale\nimport os\nimport site\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\ndirectory, file = path.split(__file__)\ndirectory = path.expanduser(directory)\ndirectory = path.abspath(directory)\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\nif not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\nsite_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n# add virtualenv's site-packages to sys.path and run .pth files\nsite.addsitedir(site_pkgs_path)\n\n# remove existing, system-level site-packages from sys.path\nfor site_packages in site.getsitepackages():\n if site_packages in sys.path:\n sys.path.remove(site_packages)\n\n# Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\nbin_path = os.path.join(venv_path, \"bin\")\nos.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n# Add gdb-pt-dump directory to sys.path so it can be imported\ngdbpt = path.join(directory, \"gdb-pt-dump\")\nsys.path.append(directory)\nsys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}]}
1,523
479
gh_patches_debug_13716
rasdani/github-patches
git_diff
facebookresearch__ParlAI-340
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Return type ambiguous when extracting image features The return type of the extracted image features (if the features file is present or not) is different. If the file is present then it returns a numpy.ndarray type object otherwise it returns a torch.autograd.variable.Variable object. ( https://github.com/facebookresearch/ParlAI/blob/3d86ccdbb4d87002cc6c4782afd0ee5277e742f1/parlai/core/image_featurizers.py#L149 ) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parlai/core/image_featurizers.py` Content: ``` 1 # All rights reserved. 2 # This source code is licensed under the BSD-style license found in the 3 # LICENSE file in the root directory of this source tree. An additional grant 4 # of patent rights can be found in the PATENTS file in the same directory. 5 6 import parlai.core.build_data as build_data 7 8 import os 9 import copy 10 import numpy as np 11 from PIL import Image 12 13 _greyscale = ' .,:;crsA23hHG#98&@' 14 15 class ImageLoader(): 16 """Extract image feature using pretrained CNN network. 17 """ 18 def __init__(self, opt): 19 self.opt = copy.deepcopy(opt) 20 self.netCNN = None 21 22 def init_cnn(self): 23 """Lazy initialization of preprocessor model in case we don't need any image preprocessing.""" 24 try: 25 import torch 26 except ModuleNotFoundError: 27 raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org') 28 from torch.autograd import Variable 29 import torchvision 30 import torchvision.transforms as transforms 31 import torch.nn as nn 32 33 opt = self.opt 34 self.image_size = opt['image_size'] 35 self.crop_size = opt['image_cropsize'] 36 self.datatype = opt['datatype'] 37 self.image_mode = opt['image_mode'] 38 39 opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available() 40 self.use_cuda = opt['cuda'] 41 42 if self.use_cuda: 43 print('[ Using CUDA ]') 44 torch.cuda.set_device(opt['gpu']) 45 46 cnn_type, layer_num = self.image_mode_switcher() 47 48 # initialize the pretrained CNN using pytorch. 49 CNN = getattr(torchvision.models, cnn_type) 50 51 # cut off the additional layer. 52 self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num]) 53 54 # initialize the transform function using torch vision. 55 self.transform = transforms.Compose([ 56 transforms.Scale(self.image_size), 57 transforms.CenterCrop(self.crop_size), 58 transforms.ToTensor(), 59 transforms.Normalize(mean=[0.485, 0.456, 0.406], 60 std=[0.229, 0.224, 0.225]) 61 ]) 62 63 # container for single image 64 self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0) 65 66 if self.use_cuda: 67 self.cuda() 68 self.xs = self.xs.cuda() 69 70 # make self.xs variable. 71 self.xs = Variable(self.xs) 72 73 def cuda(self): 74 self.netCNN.cuda() 75 76 def save(self, feature, path): 77 feature = feature.cpu().data.numpy() 78 np.save(path, feature) 79 80 def image_mode_switcher(self): 81 switcher = { 82 'resnet152': ['resnet152', -1], 83 'resnet101': ['resnet101', -1], 84 'resnet50': ['resnet50', -1], 85 'resnet34': ['resnet34', -1], 86 'resnet18': ['resnet18', -1], 87 'resnet152_spatial': ['resnet152', -2], 88 'resnet101_spatial': ['resnet101', -2], 89 'resnet50_spatial': ['resnet50', -2], 90 'resnet34_spatial': ['resnet34', -2], 91 'resnet18_spatial': ['resnet18', -2], 92 } 93 94 if self.image_mode not in switcher: 95 raise NotImplementedError('image preprocessing mode' + 96 '{} not supported yet'.format(self.image_mode)) 97 98 return switcher.get(self.image_mode) 99 100 def extract(self, image, path): 101 # check whether initlize CNN network. 102 if not self.netCNN: 103 self.init_cnn() 104 105 self.xs.data.copy_(self.transform(image)) 106 # extract the image feature 107 feature = self.netCNN(self.xs) 108 # save the feature 109 self.save(feature, path) 110 return feature 111 112 def img_to_ascii(self, path): 113 im = Image.open(path) 114 im.thumbnail((60, 40), Image.BICUBIC) 115 im = im.convert('L') 116 asc = [] 117 for y in range(0, im.size[1]): 118 for x in range(0, im.size[0]): 119 lum = 255 - im.getpixel((x, y)) 120 asc.append(_greyscale[lum * len(_greyscale) // 256]) 121 asc.append('\n') 122 return ''.join(asc) 123 124 def load(self, path): 125 opt = self.opt 126 mode = opt.get('image_mode', 'raw') 127 if mode is None or mode == 'none': 128 # don't need to load images 129 return None 130 elif mode == 'raw': 131 # raw just returns RGB values 132 return Image.open(path).convert('RGB') 133 elif mode == 'ascii': 134 # convert images to ascii Β―\_(ツ)_/Β― 135 return self.img_to_ascii(path) 136 else: 137 # otherwise, looks for preprocessed version under 'mode' directory 138 prepath, imagefn = os.path.split(path) 139 140 dpath = os.path.join(prepath, mode) 141 142 if not os.path.exists(dpath): 143 build_data.make_dir(dpath) 144 145 imagefn = imagefn + '.npy' 146 new_path = os.path.join(prepath, mode, imagefn) 147 148 if not os.path.isfile(new_path): 149 return self.extract(Image.open(path).convert('RGB'), new_path) 150 else: 151 return np.load(new_path) 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py --- a/parlai/core/image_featurizers.py +++ b/parlai/core/image_featurizers.py @@ -74,7 +74,6 @@ self.netCNN.cuda() def save(self, feature, path): - feature = feature.cpu().data.numpy() np.save(path, feature) def image_mode_switcher(self): @@ -105,6 +104,7 @@ self.xs.data.copy_(self.transform(image)) # extract the image feature feature = self.netCNN(self.xs) + feature = feature.cpu().data.numpy() # save the feature self.save(feature, path) return feature
{"golden_diff": "diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py\n--- a/parlai/core/image_featurizers.py\n+++ b/parlai/core/image_featurizers.py\n@@ -74,7 +74,6 @@\n self.netCNN.cuda()\n \n def save(self, feature, path):\n- feature = feature.cpu().data.numpy()\n np.save(path, feature)\n \n def image_mode_switcher(self):\n@@ -105,6 +104,7 @@\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n+ feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n", "issue": "Return type ambiguous when extracting image features\nThe return type of the extracted image features (if the features file is present or not) is different. If the file is present then it returns a numpy.ndarray type object otherwise it returns a torch.autograd.variable.Variable object.\r\n( https://github.com/facebookresearch/ParlAI/blob/3d86ccdbb4d87002cc6c4782afd0ee5277e742f1/parlai/core/image_featurizers.py#L149 )\n", "before_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n feature = feature.cpu().data.numpy()\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}], "after_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}]}
1,979
175
gh_patches_debug_13533
rasdani/github-patches
git_diff
pre-commit__pre-commit-306
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve error message when attempting to run non-existent hook Hook id in `/.pre-commit-config.yaml` doesn't exist in the included repository (From https://github.com/pre-commit/pre-commit-hooks/issues/37) It should probably suggest updating hooks or checking the spelling of the hookid --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/repository.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import logging 4 import shutil 5 from collections import defaultdict 6 7 from cached_property import cached_property 8 9 from pre_commit import git 10 from pre_commit.clientlib.validate_config import is_local_hooks 11 from pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA 12 from pre_commit.jsonschema_extensions import apply_defaults 13 from pre_commit.languages.all import languages 14 from pre_commit.languages.helpers import environment_dir 15 from pre_commit.manifest import Manifest 16 from pre_commit.prefixed_command_runner import PrefixedCommandRunner 17 18 19 logger = logging.getLogger('pre_commit') 20 21 22 class Repository(object): 23 def __init__(self, repo_config, repo_path_getter): 24 self.repo_config = repo_config 25 self.repo_path_getter = repo_path_getter 26 self.__installed = False 27 28 @classmethod 29 def create(cls, config, store): 30 if is_local_hooks(config): 31 return LocalRepository(config) 32 else: 33 repo_path_getter = store.get_repo_path_getter( 34 config['repo'], config['sha'] 35 ) 36 return cls(config, repo_path_getter) 37 38 @cached_property 39 def repo_url(self): 40 return self.repo_config['repo'] 41 42 @cached_property 43 def sha(self): 44 return self.repo_config['sha'] 45 46 @cached_property 47 def languages(self): 48 return set( 49 (hook['language'], hook['language_version']) 50 for _, hook in self.hooks 51 ) 52 53 @cached_property 54 def additional_dependencies(self): 55 dep_dict = defaultdict(lambda: defaultdict(set)) 56 for _, hook in self.hooks: 57 dep_dict[hook['language']][hook['language_version']].update( 58 hook.get('additional_dependencies', []), 59 ) 60 return dep_dict 61 62 @cached_property 63 def hooks(self): 64 # TODO: merging in manifest dicts is a smell imo 65 return tuple( 66 (hook['id'], dict(self.manifest.hooks[hook['id']], **hook)) 67 for hook in self.repo_config['hooks'] 68 ) 69 70 @cached_property 71 def manifest(self): 72 return Manifest(self.repo_path_getter) 73 74 @cached_property 75 def cmd_runner(self): 76 return PrefixedCommandRunner(self.repo_path_getter.repo_path) 77 78 def require_installed(self): 79 if self.__installed: 80 return 81 82 self.install() 83 self.__installed = True 84 85 def install(self): 86 """Install the hook repository.""" 87 def language_is_installed(language_name, language_version): 88 language = languages[language_name] 89 directory = environment_dir( 90 language.ENVIRONMENT_DIR, language_version, 91 ) 92 return ( 93 directory is None or 94 self.cmd_runner.exists(directory, '.installed') 95 ) 96 97 if not all( 98 language_is_installed(language_name, language_version) 99 for language_name, language_version in self.languages 100 ): 101 logger.info( 102 'Installing environment for {0}.'.format(self.repo_url) 103 ) 104 logger.info('Once installed this environment will be reused.') 105 logger.info('This may take a few minutes...') 106 107 for language_name, language_version in self.languages: 108 language = languages[language_name] 109 if language_is_installed(language_name, language_version): 110 continue 111 112 directory = environment_dir( 113 language.ENVIRONMENT_DIR, language_version, 114 ) 115 # There's potentially incomplete cleanup from previous runs 116 # Clean it up! 117 if self.cmd_runner.exists(directory): 118 shutil.rmtree(self.cmd_runner.path(directory)) 119 120 language.install_environment( 121 self.cmd_runner, language_version, 122 self.additional_dependencies[language_name][language_version], 123 ) 124 # Touch the .installed file (atomic) to indicate we've installed 125 open(self.cmd_runner.path(directory, '.installed'), 'w').close() 126 127 def run_hook(self, hook, file_args): 128 """Run a hook. 129 130 Args: 131 hook - Hook dictionary 132 file_args - List of files to run 133 """ 134 self.require_installed() 135 return languages[hook['language']].run_hook( 136 self.cmd_runner, hook, file_args, 137 ) 138 139 140 class LocalRepository(Repository): 141 def __init__(self, repo_config): 142 super(LocalRepository, self).__init__(repo_config, None) 143 144 @cached_property 145 def hooks(self): 146 return tuple( 147 (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items'])) 148 for hook in self.repo_config['hooks'] 149 ) 150 151 @cached_property 152 def cmd_runner(self): 153 return PrefixedCommandRunner(git.get_root()) 154 155 @cached_property 156 def sha(self): 157 raise NotImplementedError 158 159 @cached_property 160 def manifest(self): 161 raise NotImplementedError 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/repository.py b/pre_commit/repository.py --- a/pre_commit/repository.py +++ b/pre_commit/repository.py @@ -61,7 +61,16 @@ @cached_property def hooks(self): - # TODO: merging in manifest dicts is a smell imo + for hook in self.repo_config['hooks']: + if hook['id'] not in self.manifest.hooks: + logger.error( + '`{0}` is not present in repository {1}. ' + 'Typo? Perhaps it is introduced in a newer version? ' + 'Often `pre-commit autoupdate` fixes this.'.format( + hook['id'], self.repo_config['repo'], + ) + ) + exit(1) return tuple( (hook['id'], dict(self.manifest.hooks[hook['id']], **hook)) for hook in self.repo_config['hooks']
{"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -61,7 +61,16 @@\n \n @cached_property\n def hooks(self):\n- # TODO: merging in manifest dicts is a smell imo\n+ for hook in self.repo_config['hooks']:\n+ if hook['id'] not in self.manifest.hooks:\n+ logger.error(\n+ '`{0}` is not present in repository {1}. '\n+ 'Typo? Perhaps it is introduced in a newer version? '\n+ 'Often `pre-commit autoupdate` fixes this.'.format(\n+ hook['id'], self.repo_config['repo'],\n+ )\n+ )\n+ exit(1)\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n", "issue": "Improve error message when attempting to run non-existent hook\nHook id in `/.pre-commit-config.yaml` doesn't exist in the included repository\n\n(From https://github.com/pre-commit/pre-commit-hooks/issues/37)\n\nIt should probably suggest updating hooks or checking the spelling of the hookid\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nfrom collections import defaultdict\n\nfrom cached_property import cached_property\n\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import is_local_hooks\nfrom pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA\nfrom pre_commit.jsonschema_extensions import apply_defaults\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nlogger = logging.getLogger('pre_commit')\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n if is_local_hooks(config):\n return LocalRepository(config)\n else:\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def additional_dependencies(self):\n dep_dict = defaultdict(lambda: defaultdict(set))\n for _, hook in self.hooks:\n dep_dict[hook['language']][hook['language_version']].update(\n hook.get('additional_dependencies', []),\n )\n return dep_dict\n\n @cached_property\n def hooks(self):\n # TODO: merging in manifest dicts is a smell imo\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n def language_is_installed(language_name, language_version):\n language = languages[language_name]\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n return (\n directory is None or\n self.cmd_runner.exists(directory, '.installed')\n )\n\n if not all(\n language_is_installed(language_name, language_version)\n for language_name, language_version in self.languages\n ):\n logger.info(\n 'Installing environment for {0}.'.format(self.repo_url)\n )\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if language_is_installed(language_name, language_version):\n continue\n\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if self.cmd_runner.exists(directory):\n shutil.rmtree(self.cmd_runner.path(directory))\n\n language.install_environment(\n self.cmd_runner, language_version,\n self.additional_dependencies[language_name][language_version],\n )\n # Touch the .installed file (atomic) to indicate we've installed\n open(self.cmd_runner.path(directory, '.installed'), 'w').close()\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n\n\nclass LocalRepository(Repository):\n def __init__(self, repo_config):\n super(LocalRepository, self).__init__(repo_config, None)\n\n @cached_property\n def hooks(self):\n return tuple(\n (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(git.get_root())\n\n @cached_property\n def sha(self):\n raise NotImplementedError\n\n @cached_property\n def manifest(self):\n raise NotImplementedError\n", "path": "pre_commit/repository.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport shutil\nfrom collections import defaultdict\n\nfrom cached_property import cached_property\n\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import is_local_hooks\nfrom pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA\nfrom pre_commit.jsonschema_extensions import apply_defaults\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nlogger = logging.getLogger('pre_commit')\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n if is_local_hooks(config):\n return LocalRepository(config)\n else:\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def additional_dependencies(self):\n dep_dict = defaultdict(lambda: defaultdict(set))\n for _, hook in self.hooks:\n dep_dict[hook['language']][hook['language_version']].update(\n hook.get('additional_dependencies', []),\n )\n return dep_dict\n\n @cached_property\n def hooks(self):\n for hook in self.repo_config['hooks']:\n if hook['id'] not in self.manifest.hooks:\n logger.error(\n '`{0}` is not present in repository {1}. '\n 'Typo? Perhaps it is introduced in a newer version? '\n 'Often `pre-commit autoupdate` fixes this.'.format(\n hook['id'], self.repo_config['repo'],\n )\n )\n exit(1)\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n def language_is_installed(language_name, language_version):\n language = languages[language_name]\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n return (\n directory is None or\n self.cmd_runner.exists(directory, '.installed')\n )\n\n if not all(\n language_is_installed(language_name, language_version)\n for language_name, language_version in self.languages\n ):\n logger.info(\n 'Installing environment for {0}.'.format(self.repo_url)\n )\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if language_is_installed(language_name, language_version):\n continue\n\n directory = environment_dir(\n language.ENVIRONMENT_DIR, language_version,\n )\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if self.cmd_runner.exists(directory):\n shutil.rmtree(self.cmd_runner.path(directory))\n\n language.install_environment(\n self.cmd_runner, language_version,\n self.additional_dependencies[language_name][language_version],\n )\n # Touch the .installed file (atomic) to indicate we've installed\n open(self.cmd_runner.path(directory, '.installed'), 'w').close()\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n\n\nclass LocalRepository(Repository):\n def __init__(self, repo_config):\n super(LocalRepository, self).__init__(repo_config, None)\n\n @cached_property\n def hooks(self):\n return tuple(\n (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(git.get_root())\n\n @cached_property\n def sha(self):\n raise NotImplementedError\n\n @cached_property\n def manifest(self):\n raise NotImplementedError\n", "path": "pre_commit/repository.py"}]}
1,687
205
gh_patches_debug_29443
rasdani/github-patches
git_diff
xonsh__xonsh-4795
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Newlines in $PROMPT are not rendered ## xonfig Fresh install of xonsh on Ubuntu 21.10 <details> ``` BEI\slotmv@aln-lx912631:~||βœ“\n➀ echo $PROMPT <stdin>:1:5 - echo $PROMPT <stdin>:1:5 + ![echo $PROMPT] {env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\n{BOLD_INTENSE_RED}➀{RESET} ``` ``` BEI\slotmv@aln-lx912631:~||βœ“\n➀ xonfig <xonsh-code>:1:0 - xonfig <xonsh-code>:1:0 + ![xonfig] +------------------+-----------------------+ | xonsh | 0.12.2 | | Git SHA | e0a0394b | | Commit Date | Apr 25 10:52:15 2022 | | Python | 3.9.7 | | PLY | 3.11 | | have readline | True | | prompt toolkit | 3.0.24 | | shell type | prompt_toolkit | | history backend | json | | pygments | 2.7.1 | | on posix | True | | on linux | True | | distro | ubuntu | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | xontrib | [] | | RC file 1 | /home/slotmv/.xonshrc | +------------------+-----------------------+ ``` </details> ## Expected Behavior I expect the xonsh prompt to be rendered as in the web-preview: (env) lou@carcolh:~/snail/stuff|branch|+2βš‘7 ➀ ## Current Behavior The prompt is instead rendered as follows: (env) lou@carcolh:~/snail/stuff|branch|+2βš‘7\n➀ ## Steps to Reproduce - Install xonsh using `sudo pip install xonsh` - Run `xonfig web` - Select color-scheme and prompt - Start new xonsh-shell --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xonsh/webconfig/xonsh_data.py` Content: ``` 1 """script for compiling elm source and dumping it to the js folder.""" 2 import functools 3 import io 4 import logging 5 6 import pygments 7 8 from xonsh.color_tools import rgb_to_ints 9 from xonsh.prompt.base import PromptFormatter, default_prompt 10 from xonsh.pyghooks import ( 11 Token, 12 XonshHtmlFormatter, 13 XonshLexer, 14 XonshStyle, 15 xonsh_style_proxy, 16 ) 17 from xonsh.pygments_cache import get_all_styles 18 from xonsh.style_tools import partial_color_tokenize 19 from xonsh.xontribs_meta import Xontrib, get_xontribs 20 21 # $RAISE_SUBPROC_ERROR = True 22 # $XONSH_SHOW_TRACEBACK = False 23 24 # 25 # helper funcs 26 # 27 28 29 @functools.lru_cache(maxsize=4) 30 def get_rst_formatter(**kwargs): 31 from pygments.formatters.html import HtmlFormatter 32 from pygments.lexers.markup import RstLexer 33 34 return RstLexer(), HtmlFormatter(**kwargs) 35 36 37 def escape(s): 38 return s.replace(r"\n", "<br/>") 39 40 41 def invert_color(orig): 42 r, g, b = rgb_to_ints(orig) 43 inverted = [255 - r, 255 - g, 255 - b] 44 new = [hex(n)[2:] for n in inverted] 45 new = [n if len(n) == 2 else "0" + n for n in new] 46 return "".join(new) 47 48 49 def html_format(s, style="default"): 50 buf = io.StringIO() 51 proxy_style = xonsh_style_proxy(XonshStyle(style)) 52 # make sure we have a foreground color 53 fgcolor = proxy_style._styles[Token.Text][0] 54 if not fgcolor: 55 fgcolor = invert_color(proxy_style.background_color[1:].strip("#")) 56 # need to generate stream before creating formatter so that all tokens actually exist 57 if isinstance(s, str): 58 token_stream = partial_color_tokenize(s) 59 else: 60 token_stream = s 61 formatter = XonshHtmlFormatter( 62 wrapcode=True, 63 noclasses=True, 64 style=proxy_style, 65 prestyles="margin: 0em; padding: 0.5em 0.1em; color: #" + fgcolor, 66 cssstyles="border-style: solid; border-radius: 5px", 67 ) 68 formatter.format(token_stream, buf) 69 return buf.getvalue() 70 71 72 def rst_to_html(text): 73 try: 74 from pygments import highlight 75 76 lexer, formatter = get_rst_formatter( 77 noclasses=True, 78 cssstyles="background: transparent", 79 style="monokai", # a dark bg style 80 ) 81 return highlight(text, lexer, formatter) 82 except ImportError: 83 return text 84 85 86 # render prompts 87 def get_named_prompts(): 88 return [ 89 ( 90 "default", 91 default_prompt(), 92 ), 93 ("debian chroot", "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{RESET}> "), 94 ("minimalist", "{BOLD_GREEN}{cwd_base}{RESET} ) "), 95 ( 96 "terlar", 97 "{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:" 98 "{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\n{BOLD_INTENSE_RED}➀{RESET} ", 99 ), 100 ( 101 "default with git status", 102 "{env_name}{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}" 103 "{branch_color}{gitstatus: {}}{RESET} {BOLD_BLUE}" 104 "{prompt_end}{RESET} ", 105 ), 106 ("robbyrussell", "{BOLD_INTENSE_RED}➜ {CYAN}{cwd_base} {gitstatus}{RESET} "), 107 ("just a dollar", "$ "), 108 ( 109 "simple pythonista", 110 "{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} " 111 "in {BOLD_GREEN}{cwd}{RESET}\\nβ†ͺ ", 112 ), 113 ( 114 "informative", 115 "[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} " 116 "{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\n> ", 117 ), 118 ( 119 "informative Version Control", 120 "{YELLOW}{env_name} " "{BOLD_GREEN}{cwd} {gitstatus}{RESET} {prompt_end} ", 121 ), 122 ("classic", "{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> "), 123 ( 124 "classic with git status", 125 "{gitstatus} {RESET}{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> ", 126 ), 127 ("screen savvy", "{YELLOW}{user}@{PURPLE}{hostname}{BOLD_GREEN}{cwd}{RESET}> "), 128 ( 129 "sorin", 130 "{CYAN}{cwd} {INTENSE_RED}❯{INTENSE_YELLOW}❯{INTENSE_GREEN}❯{RESET} ", 131 ), 132 ( 133 "acidhub", 134 "❰{INTENSE_GREEN}{user}{RESET}❙{YELLOW}{cwd}{RESET}{env_name}❱{gitstatus}≻ ", 135 ), 136 ( 137 "nim", 138 "{INTENSE_GREEN}┬─[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}" 139 "{INTENSE_GREEN}]─[{localtime}]─[{RESET}G:{INTENSE_GREEN}{curr_branch}=]" 140 "\\n{INTENSE_GREEN}╰─>{INTENSE_RED}{prompt_end}{RESET} ", 141 ), 142 ] 143 144 145 def get_initial(env, prompt_format, fields): 146 template = env.get_stringified("PROMPT") 147 return { 148 "value": template, 149 "display": escape(html_format(prompt_format(template, fields=fields))), 150 } 151 152 153 def render_prompts(env): 154 prompt_format = PromptFormatter() 155 fields = dict(env.get("PROMPT_FIELDS") or {}) 156 fields.update( 157 cwd="~/snail/stuff", 158 cwd_base="stuff", 159 user="lou", 160 hostname="carcolh", 161 env_name=fields["env_prefix"] + "env" + fields["env_postfix"], 162 curr_branch="branch", 163 gitstatus="{CYAN}branch|{BOLD_BLUE}+2{RESET}βš‘7", 164 branch_color="{BOLD_INTENSE_RED}", 165 localtime="15:56:07", 166 ) 167 yield get_initial(env, prompt_format, fields) 168 for name, template in get_named_prompts(): 169 display = html_format(prompt_format(template, fields=fields)) 170 yield name, { 171 "value": template, 172 "display": escape(display), 173 } 174 175 176 def render_colors(): 177 source = ( 178 "import sys\n" 179 'echo "Welcome $USER on" @(sys.platform)\n\n' 180 "def func(x=42):\n" 181 ' d = {"xonsh": True}\n' 182 ' return d.get("xonsh") and you\n\n' 183 "# This is a comment\n" 184 "![env | uniq | sort | grep PATH]\n" 185 ) 186 lexer = XonshLexer() 187 lexer.add_filter("tokenmerge") 188 token_stream = list(pygments.lex(source, lexer=lexer)) 189 token_stream = [(t, s.replace("\n", "\\n")) for t, s in token_stream] 190 styles = sorted(get_all_styles()) 191 styles.insert(0, styles.pop(styles.index("default"))) 192 for style in styles: 193 try: 194 display = html_format(token_stream, style=style) 195 except Exception as ex: 196 logging.error( 197 f"Failed to format Xonsh code {ex!r}. {style!r}", exc_info=True 198 ) 199 display = source 200 yield style, escape(display) 201 202 203 def format_xontrib(xontrib: Xontrib): 204 return { 205 "url": xontrib.url, 206 "license": xontrib.package.license if xontrib.package else "", 207 "display": escape(rst_to_html(xontrib.description)), 208 } 209 210 211 def render_xontribs(): 212 md = get_xontribs() 213 for xontrib_name, xontrib in md.items(): 214 yield xontrib_name, format_xontrib(xontrib) 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xonsh/webconfig/xonsh_data.py b/xonsh/webconfig/xonsh_data.py --- a/xonsh/webconfig/xonsh_data.py +++ b/xonsh/webconfig/xonsh_data.py @@ -95,7 +95,7 @@ ( "terlar", "{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:" - "{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\n{BOLD_INTENSE_RED}➀{RESET} ", + "{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\n{BOLD_INTENSE_RED}➀{RESET} ", ), ( "default with git status", @@ -108,12 +108,12 @@ ( "simple pythonista", "{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} " - "in {BOLD_GREEN}{cwd}{RESET}\\nβ†ͺ ", + "in {BOLD_GREEN}{cwd}{RESET}\nβ†ͺ ", ), ( "informative", "[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} " - "{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\n> ", + "{BOLD_GREEN}{cwd} {gitstatus}{RESET}\n> ", ), ( "informative Version Control", @@ -137,7 +137,7 @@ "nim", "{INTENSE_GREEN}┬─[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}" "{INTENSE_GREEN}]─[{localtime}]─[{RESET}G:{INTENSE_GREEN}{curr_branch}=]" - "\\n{INTENSE_GREEN}╰─>{INTENSE_RED}{prompt_end}{RESET} ", + "\n{INTENSE_GREEN}╰─>{INTENSE_RED}{prompt_end}{RESET} ", ), ]
{"golden_diff": "diff --git a/xonsh/webconfig/xonsh_data.py b/xonsh/webconfig/xonsh_data.py\n--- a/xonsh/webconfig/xonsh_data.py\n+++ b/xonsh/webconfig/xonsh_data.py\n@@ -95,7 +95,7 @@\n (\n \"terlar\",\n \"{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:\"\n- \"{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\\\n{BOLD_INTENSE_RED}\u27a4{RESET} \",\n+ \"{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\n{BOLD_INTENSE_RED}\u27a4{RESET} \",\n ),\n (\n \"default with git status\",\n@@ -108,12 +108,12 @@\n (\n \"simple pythonista\",\n \"{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} \"\n- \"in {BOLD_GREEN}{cwd}{RESET}\\\\n\u21aa \",\n+ \"in {BOLD_GREEN}{cwd}{RESET}\\n\u21aa \",\n ),\n (\n \"informative\",\n \"[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} \"\n- \"{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\\\n> \",\n+ \"{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\n> \",\n ),\n (\n \"informative Version Control\",\n@@ -137,7 +137,7 @@\n \"nim\",\n \"{INTENSE_GREEN}\u252c\u2500[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}\"\n \"{INTENSE_GREEN}]\u2500[{localtime}]\u2500[{RESET}G:{INTENSE_GREEN}{curr_branch}=]\"\n- \"\\\\n{INTENSE_GREEN}\u2570\u2500>{INTENSE_RED}{prompt_end}{RESET} \",\n+ \"\\n{INTENSE_GREEN}\u2570\u2500>{INTENSE_RED}{prompt_end}{RESET} \",\n ),\n ]\n", "issue": "Newlines in $PROMPT are not rendered\n## xonfig\r\n\r\nFresh install of xonsh on Ubuntu 21.10\r\n\r\n<details>\r\n\r\n```\r\nBEI\\slotmv@aln-lx912631:~||\u2713\\n\u27a4 echo $PROMPT\r\n<stdin>:1:5 - echo $PROMPT\r\n<stdin>:1:5 + ![echo $PROMPT]\r\n{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\n{BOLD_INTENSE_RED}\u27a4{RESET}\r\n```\r\n\r\n```\r\nBEI\\slotmv@aln-lx912631:~||\u2713\\n\u27a4 xonfig\r\n<xonsh-code>:1:0 - xonfig\r\n<xonsh-code>:1:0 + ![xonfig]\r\n+------------------+-----------------------+\r\n| xonsh | 0.12.2 |\r\n| Git SHA | e0a0394b |\r\n| Commit Date | Apr 25 10:52:15 2022 |\r\n| Python | 3.9.7 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.24 |\r\n| shell type | prompt_toolkit |\r\n| history backend | json |\r\n| pygments | 2.7.1 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | ubuntu |\r\n| on wsl | False |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| xontrib | [] |\r\n| RC file 1 | /home/slotmv/.xonshrc |\r\n+------------------+-----------------------+\r\n\r\n\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Expected Behavior\r\nI expect the xonsh prompt to be rendered as in the web-preview:\r\n\r\n(env) lou@carcolh:~/snail/stuff|branch|+2\u26917\r\n\u27a4 \r\n\r\n\r\n## Current Behavior\r\nThe prompt is instead rendered as follows:\r\n(env) lou@carcolh:~/snail/stuff|branch|+2\u26917\\n\u27a4 \r\n\r\n\r\n## Steps to Reproduce\r\n- Install xonsh using `sudo pip install xonsh`\r\n- Run `xonfig web`\r\n- Select color-scheme and prompt\r\n- Start new xonsh-shell\r\n\n", "before_files": [{"content": "\"\"\"script for compiling elm source and dumping it to the js folder.\"\"\"\nimport functools\nimport io\nimport logging\n\nimport pygments\n\nfrom xonsh.color_tools import rgb_to_ints\nfrom xonsh.prompt.base import PromptFormatter, default_prompt\nfrom xonsh.pyghooks import (\n Token,\n XonshHtmlFormatter,\n XonshLexer,\n XonshStyle,\n xonsh_style_proxy,\n)\nfrom xonsh.pygments_cache import get_all_styles\nfrom xonsh.style_tools import partial_color_tokenize\nfrom xonsh.xontribs_meta import Xontrib, get_xontribs\n\n# $RAISE_SUBPROC_ERROR = True\n# $XONSH_SHOW_TRACEBACK = False\n\n#\n# helper funcs\n#\n\n\[email protected]_cache(maxsize=4)\ndef get_rst_formatter(**kwargs):\n from pygments.formatters.html import HtmlFormatter\n from pygments.lexers.markup import RstLexer\n\n return RstLexer(), HtmlFormatter(**kwargs)\n\n\ndef escape(s):\n return s.replace(r\"\\n\", \"<br/>\")\n\n\ndef invert_color(orig):\n r, g, b = rgb_to_ints(orig)\n inverted = [255 - r, 255 - g, 255 - b]\n new = [hex(n)[2:] for n in inverted]\n new = [n if len(n) == 2 else \"0\" + n for n in new]\n return \"\".join(new)\n\n\ndef html_format(s, style=\"default\"):\n buf = io.StringIO()\n proxy_style = xonsh_style_proxy(XonshStyle(style))\n # make sure we have a foreground color\n fgcolor = proxy_style._styles[Token.Text][0]\n if not fgcolor:\n fgcolor = invert_color(proxy_style.background_color[1:].strip(\"#\"))\n # need to generate stream before creating formatter so that all tokens actually exist\n if isinstance(s, str):\n token_stream = partial_color_tokenize(s)\n else:\n token_stream = s\n formatter = XonshHtmlFormatter(\n wrapcode=True,\n noclasses=True,\n style=proxy_style,\n prestyles=\"margin: 0em; padding: 0.5em 0.1em; color: #\" + fgcolor,\n cssstyles=\"border-style: solid; border-radius: 5px\",\n )\n formatter.format(token_stream, buf)\n return buf.getvalue()\n\n\ndef rst_to_html(text):\n try:\n from pygments import highlight\n\n lexer, formatter = get_rst_formatter(\n noclasses=True,\n cssstyles=\"background: transparent\",\n style=\"monokai\", # a dark bg style\n )\n return highlight(text, lexer, formatter)\n except ImportError:\n return text\n\n\n# render prompts\ndef get_named_prompts():\n return [\n (\n \"default\",\n default_prompt(),\n ),\n (\"debian chroot\", \"{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{RESET}> \"),\n (\"minimalist\", \"{BOLD_GREEN}{cwd_base}{RESET} ) \"),\n (\n \"terlar\",\n \"{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:\"\n \"{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\\\n{BOLD_INTENSE_RED}\u27a4{RESET} \",\n ),\n (\n \"default with git status\",\n \"{env_name}{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}\"\n \"{branch_color}{gitstatus: {}}{RESET} {BOLD_BLUE}\"\n \"{prompt_end}{RESET} \",\n ),\n (\"robbyrussell\", \"{BOLD_INTENSE_RED}\u279c {CYAN}{cwd_base} {gitstatus}{RESET} \"),\n (\"just a dollar\", \"$ \"),\n (\n \"simple pythonista\",\n \"{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} \"\n \"in {BOLD_GREEN}{cwd}{RESET}\\\\n\u21aa \",\n ),\n (\n \"informative\",\n \"[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} \"\n \"{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\\\n> \",\n ),\n (\n \"informative Version Control\",\n \"{YELLOW}{env_name} \" \"{BOLD_GREEN}{cwd} {gitstatus}{RESET} {prompt_end} \",\n ),\n (\"classic\", \"{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> \"),\n (\n \"classic with git status\",\n \"{gitstatus} {RESET}{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> \",\n ),\n (\"screen savvy\", \"{YELLOW}{user}@{PURPLE}{hostname}{BOLD_GREEN}{cwd}{RESET}> \"),\n (\n \"sorin\",\n \"{CYAN}{cwd} {INTENSE_RED}\u276f{INTENSE_YELLOW}\u276f{INTENSE_GREEN}\u276f{RESET} \",\n ),\n (\n \"acidhub\",\n \"\u2770{INTENSE_GREEN}{user}{RESET}\u2759{YELLOW}{cwd}{RESET}{env_name}\u2771{gitstatus}\u227b \",\n ),\n (\n \"nim\",\n \"{INTENSE_GREEN}\u252c\u2500[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}\"\n \"{INTENSE_GREEN}]\u2500[{localtime}]\u2500[{RESET}G:{INTENSE_GREEN}{curr_branch}=]\"\n \"\\\\n{INTENSE_GREEN}\u2570\u2500>{INTENSE_RED}{prompt_end}{RESET} \",\n ),\n ]\n\n\ndef get_initial(env, prompt_format, fields):\n template = env.get_stringified(\"PROMPT\")\n return {\n \"value\": template,\n \"display\": escape(html_format(prompt_format(template, fields=fields))),\n }\n\n\ndef render_prompts(env):\n prompt_format = PromptFormatter()\n fields = dict(env.get(\"PROMPT_FIELDS\") or {})\n fields.update(\n cwd=\"~/snail/stuff\",\n cwd_base=\"stuff\",\n user=\"lou\",\n hostname=\"carcolh\",\n env_name=fields[\"env_prefix\"] + \"env\" + fields[\"env_postfix\"],\n curr_branch=\"branch\",\n gitstatus=\"{CYAN}branch|{BOLD_BLUE}+2{RESET}\u26917\",\n branch_color=\"{BOLD_INTENSE_RED}\",\n localtime=\"15:56:07\",\n )\n yield get_initial(env, prompt_format, fields)\n for name, template in get_named_prompts():\n display = html_format(prompt_format(template, fields=fields))\n yield name, {\n \"value\": template,\n \"display\": escape(display),\n }\n\n\ndef render_colors():\n source = (\n \"import sys\\n\"\n 'echo \"Welcome $USER on\" @(sys.platform)\\n\\n'\n \"def func(x=42):\\n\"\n ' d = {\"xonsh\": True}\\n'\n ' return d.get(\"xonsh\") and you\\n\\n'\n \"# This is a comment\\n\"\n \"![env | uniq | sort | grep PATH]\\n\"\n )\n lexer = XonshLexer()\n lexer.add_filter(\"tokenmerge\")\n token_stream = list(pygments.lex(source, lexer=lexer))\n token_stream = [(t, s.replace(\"\\n\", \"\\\\n\")) for t, s in token_stream]\n styles = sorted(get_all_styles())\n styles.insert(0, styles.pop(styles.index(\"default\")))\n for style in styles:\n try:\n display = html_format(token_stream, style=style)\n except Exception as ex:\n logging.error(\n f\"Failed to format Xonsh code {ex!r}. {style!r}\", exc_info=True\n )\n display = source\n yield style, escape(display)\n\n\ndef format_xontrib(xontrib: Xontrib):\n return {\n \"url\": xontrib.url,\n \"license\": xontrib.package.license if xontrib.package else \"\",\n \"display\": escape(rst_to_html(xontrib.description)),\n }\n\n\ndef render_xontribs():\n md = get_xontribs()\n for xontrib_name, xontrib in md.items():\n yield xontrib_name, format_xontrib(xontrib)\n", "path": "xonsh/webconfig/xonsh_data.py"}], "after_files": [{"content": "\"\"\"script for compiling elm source and dumping it to the js folder.\"\"\"\nimport functools\nimport io\nimport logging\n\nimport pygments\n\nfrom xonsh.color_tools import rgb_to_ints\nfrom xonsh.prompt.base import PromptFormatter, default_prompt\nfrom xonsh.pyghooks import (\n Token,\n XonshHtmlFormatter,\n XonshLexer,\n XonshStyle,\n xonsh_style_proxy,\n)\nfrom xonsh.pygments_cache import get_all_styles\nfrom xonsh.style_tools import partial_color_tokenize\nfrom xonsh.xontribs_meta import Xontrib, get_xontribs\n\n# $RAISE_SUBPROC_ERROR = True\n# $XONSH_SHOW_TRACEBACK = False\n\n#\n# helper funcs\n#\n\n\[email protected]_cache(maxsize=4)\ndef get_rst_formatter(**kwargs):\n from pygments.formatters.html import HtmlFormatter\n from pygments.lexers.markup import RstLexer\n\n return RstLexer(), HtmlFormatter(**kwargs)\n\n\ndef escape(s):\n return s.replace(r\"\\n\", \"<br/>\")\n\n\ndef invert_color(orig):\n r, g, b = rgb_to_ints(orig)\n inverted = [255 - r, 255 - g, 255 - b]\n new = [hex(n)[2:] for n in inverted]\n new = [n if len(n) == 2 else \"0\" + n for n in new]\n return \"\".join(new)\n\n\ndef html_format(s, style=\"default\"):\n buf = io.StringIO()\n proxy_style = xonsh_style_proxy(XonshStyle(style))\n # make sure we have a foreground color\n fgcolor = proxy_style._styles[Token.Text][0]\n if not fgcolor:\n fgcolor = invert_color(proxy_style.background_color[1:].strip(\"#\"))\n # need to generate stream before creating formatter so that all tokens actually exist\n if isinstance(s, str):\n token_stream = partial_color_tokenize(s)\n else:\n token_stream = s\n formatter = XonshHtmlFormatter(\n wrapcode=True,\n noclasses=True,\n style=proxy_style,\n prestyles=\"margin: 0em; padding: 0.5em 0.1em; color: #\" + fgcolor,\n cssstyles=\"border-style: solid; border-radius: 5px\",\n )\n formatter.format(token_stream, buf)\n return buf.getvalue()\n\n\ndef rst_to_html(text):\n try:\n from pygments import highlight\n\n lexer, formatter = get_rst_formatter(\n noclasses=True,\n cssstyles=\"background: transparent\",\n style=\"monokai\", # a dark bg style\n )\n return highlight(text, lexer, formatter)\n except ImportError:\n return text\n\n\n# render prompts\ndef get_named_prompts():\n return [\n (\n \"default\",\n default_prompt(),\n ),\n (\"debian chroot\", \"{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{RESET}> \"),\n (\"minimalist\", \"{BOLD_GREEN}{cwd_base}{RESET} ) \"),\n (\n \"terlar\",\n \"{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:\"\n \"{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\\n{BOLD_INTENSE_RED}\u27a4{RESET} \",\n ),\n (\n \"default with git status\",\n \"{env_name}{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}\"\n \"{branch_color}{gitstatus: {}}{RESET} {BOLD_BLUE}\"\n \"{prompt_end}{RESET} \",\n ),\n (\"robbyrussell\", \"{BOLD_INTENSE_RED}\u279c {CYAN}{cwd_base} {gitstatus}{RESET} \"),\n (\"just a dollar\", \"$ \"),\n (\n \"simple pythonista\",\n \"{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} \"\n \"in {BOLD_GREEN}{cwd}{RESET}\\n\u21aa \",\n ),\n (\n \"informative\",\n \"[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} \"\n \"{BOLD_GREEN}{cwd} {gitstatus}{RESET}\\n> \",\n ),\n (\n \"informative Version Control\",\n \"{YELLOW}{env_name} \" \"{BOLD_GREEN}{cwd} {gitstatus}{RESET} {prompt_end} \",\n ),\n (\"classic\", \"{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> \"),\n (\n \"classic with git status\",\n \"{gitstatus} {RESET}{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> \",\n ),\n (\"screen savvy\", \"{YELLOW}{user}@{PURPLE}{hostname}{BOLD_GREEN}{cwd}{RESET}> \"),\n (\n \"sorin\",\n \"{CYAN}{cwd} {INTENSE_RED}\u276f{INTENSE_YELLOW}\u276f{INTENSE_GREEN}\u276f{RESET} \",\n ),\n (\n \"acidhub\",\n \"\u2770{INTENSE_GREEN}{user}{RESET}\u2759{YELLOW}{cwd}{RESET}{env_name}\u2771{gitstatus}\u227b \",\n ),\n (\n \"nim\",\n \"{INTENSE_GREEN}\u252c\u2500[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}\"\n \"{INTENSE_GREEN}]\u2500[{localtime}]\u2500[{RESET}G:{INTENSE_GREEN}{curr_branch}=]\"\n \"\\n{INTENSE_GREEN}\u2570\u2500>{INTENSE_RED}{prompt_end}{RESET} \",\n ),\n ]\n\n\ndef get_initial(env, prompt_format, fields):\n template = env.get_stringified(\"PROMPT\")\n return {\n \"value\": template,\n \"display\": escape(html_format(prompt_format(template, fields=fields))),\n }\n\n\ndef render_prompts(env):\n prompt_format = PromptFormatter()\n fields = dict(env.get(\"PROMPT_FIELDS\") or {})\n fields.update(\n cwd=\"~/snail/stuff\",\n cwd_base=\"stuff\",\n user=\"lou\",\n hostname=\"carcolh\",\n env_name=fields[\"env_prefix\"] + \"env\" + fields[\"env_postfix\"],\n curr_branch=\"branch\",\n gitstatus=\"{CYAN}branch|{BOLD_BLUE}+2{RESET}\u26917\",\n branch_color=\"{BOLD_INTENSE_RED}\",\n localtime=\"15:56:07\",\n )\n yield get_initial(env, prompt_format, fields)\n for name, template in get_named_prompts():\n display = html_format(prompt_format(template, fields=fields))\n yield name, {\n \"value\": template,\n \"display\": escape(display),\n }\n\n\ndef render_colors():\n source = (\n \"import sys\\n\"\n 'echo \"Welcome $USER on\" @(sys.platform)\\n\\n'\n \"def func(x=42):\\n\"\n ' d = {\"xonsh\": True}\\n'\n ' return d.get(\"xonsh\") and you\\n\\n'\n \"# This is a comment\\n\"\n \"![env | uniq | sort | grep PATH]\\n\"\n )\n lexer = XonshLexer()\n lexer.add_filter(\"tokenmerge\")\n token_stream = list(pygments.lex(source, lexer=lexer))\n token_stream = [(t, s.replace(\"\\n\", \"\\\\n\")) for t, s in token_stream]\n styles = sorted(get_all_styles())\n styles.insert(0, styles.pop(styles.index(\"default\")))\n for style in styles:\n try:\n display = html_format(token_stream, style=style)\n except Exception as ex:\n logging.error(\n f\"Failed to format Xonsh code {ex!r}. {style!r}\", exc_info=True\n )\n display = source\n yield style, escape(display)\n\n\ndef format_xontrib(xontrib: Xontrib):\n return {\n \"url\": xontrib.url,\n \"license\": xontrib.package.license if xontrib.package else \"\",\n \"display\": escape(rst_to_html(xontrib.description)),\n }\n\n\ndef render_xontribs():\n md = get_xontribs()\n for xontrib_name, xontrib in md.items():\n yield xontrib_name, format_xontrib(xontrib)\n", "path": "xonsh/webconfig/xonsh_data.py"}]}
3,179
427
gh_patches_debug_40446
rasdani/github-patches
git_diff
pytorch__TensorRT-2669
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- πŸ› [Bug] Issue in conversion when parameters/buffers are moved during compilation ## Bug Description ### Bug 1 ```python File "/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch_tensorrt/dynamo/conversion/converter_utils.py", line 491, in to_numpy output = value.cpu().detach().contiguous().numpy() RuntimeError: .numpy() is not supported for tensor subclasses. ``` ### Suggested Fix 1 Need a custom version of the following function which registers a parameter, not a buffer https://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py#L39 ### Bug 2 ```python File "/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/_ops.py", line 571, in __call__ return self_._op(*args, **kwargs) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and meta! (when checking argument for argument mat2 in method wrapper_CUDA_mm) ``` ### Suggested Fix 2 Need to cast constant Tensors to `nn.Parameter` on `CUDA` at constant-folding time https://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py#L39 ### Bug 3 ```python File "<eval_with_key>.67 from /root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py:569 in wrapped", line 11, in forward File "/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/_ops.py", line 571, in __call__ return self_._op(*args, **kwargs) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and meta! (when checking argument for argument mat2 in method wrapper_CUDA_mm) ``` ### Suggested Fix 3 This line needs to be removed, as it has unintended behavior when casting constant params https://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/conversion/_conversion.py#L32 ## Expected behavior Model should compile ## Environment - Torch and Torch-TensorRT Version: `2.3.0.dev2024222+cu121` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `py/torch_tensorrt/dynamo/conversion/_conversion.py` Content: ``` 1 from __future__ import annotations 2 3 import io 4 from typing import Sequence 5 6 import tensorrt as trt 7 import torch 8 from torch_tensorrt._Input import Input 9 from torch_tensorrt.dynamo._settings import CompilationSettings 10 from torch_tensorrt.dynamo.conversion._TRTInterpreter import ( 11 TRTInterpreter, 12 TRTInterpreterResult, 13 ) 14 from torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule, TorchTensorRTModule 15 from torch_tensorrt.dynamo.utils import get_torch_inputs, to_torch_device 16 17 18 def interpret_module_to_result( 19 module: torch.fx.GraphModule, 20 inputs: Sequence[Input], 21 settings: CompilationSettings = CompilationSettings(), 22 ) -> TRTInterpreterResult: 23 """Interpret an FX module to a TRTInterpreterResult 24 Args: 25 module: FX GraphModule to interpret 26 inputs: Sequence of Tensors representing inputs to the module 27 settings: Compilation settings 28 Returns: 29 TRTInterpreterResult 30 """ 31 torch_inputs = get_torch_inputs(inputs, settings.device) 32 module.to(to_torch_device(settings.device)) 33 module_outputs = module(*torch_inputs) 34 35 if not isinstance(module_outputs, (list, tuple)): 36 module_outputs = [module_outputs] 37 38 # Int64 outputs can sometimes be generated from within other operators 39 # such as aten.sum - such outputs can be truncated 40 output_dtypes = [] 41 for output in module_outputs: 42 if settings.truncate_long_and_double and output.dtype == torch.float64: 43 output_dtypes.append(torch.float32) 44 elif settings.truncate_long_and_double and output.dtype == torch.int64: 45 output_dtypes.append(torch.int32) 46 else: 47 output_dtypes.append(output.dtype) 48 49 interpreter = TRTInterpreter( 50 module, 51 inputs, 52 logger_level=(trt.Logger.VERBOSE if settings.debug else trt.Logger.WARNING), 53 output_dtypes=output_dtypes, 54 compilation_settings=settings, 55 ) 56 interpreter_result = interpreter.run() 57 return interpreter_result 58 59 60 def convert_module( 61 module: torch.fx.GraphModule, 62 inputs: Sequence[Input], 63 settings: CompilationSettings = CompilationSettings(), 64 name: str = "", 65 ) -> PythonTorchTensorRTModule | TorchTensorRTModule: 66 """Convert an FX module to a TRT module 67 Args: 68 module: FX GraphModule to convert 69 inputs: Sequence of Tensors representing inputs to the module 70 settings: Compilation settings 71 name: TRT engine name 72 Returns: 73 _PythonTorchTensorRTModule or TorchTensorRTModule 74 """ 75 interpreter_result = interpret_module_to_result(module, inputs, settings) 76 77 if settings.use_python_runtime: 78 return PythonTorchTensorRTModule( 79 engine=interpreter_result.engine, 80 input_names=list(interpreter_result.input_names), 81 output_names=list(interpreter_result.output_names), 82 target_device=settings.device, 83 profiling_enabled=settings.debug, 84 ) 85 86 else: 87 from torch_tensorrt.dynamo.runtime import TorchTensorRTModule 88 89 with io.BytesIO() as engine_bytes: 90 engine_bytes.write(interpreter_result.engine.serialize()) 91 engine_str = engine_bytes.getvalue() 92 return TorchTensorRTModule( 93 serialized_engine=engine_str, 94 name=name, 95 input_binding_names=list(interpreter_result.input_names), 96 output_binding_names=list(interpreter_result.output_names), 97 target_device=settings.device, 98 hardware_compatible=settings.hardware_compatible, 99 ) 100 ``` Path: `py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py` Content: ``` 1 import logging 2 from typing import Any, Sequence 3 4 import torch 5 from torch_tensorrt._utils import sanitized_torch_version 6 from torch_tensorrt.dynamo.lowering.passes.pass_utils import ( 7 clean_up_graph_after_modifications, 8 ) 9 10 from packaging import version 11 12 # Modify import location of utilities based on Torch version 13 if version.parse(sanitized_torch_version()) < version.parse("2.1.1"): 14 from torch._inductor.freezing import ConstantFolder, replace_node_with_constant 15 else: 16 from torch._inductor.constant_folding import ( 17 ConstantFolder, 18 replace_node_with_constant, 19 ) 20 21 logger = logging.getLogger(__name__) 22 23 24 @torch.utils._python_dispatch._disable_current_modes() # type: ignore 25 def constant_fold( 26 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor] 27 ) -> torch.fx.GraphModule: 28 """Adapted from: 29 https://github.com/pytorch/pytorch/blob/3a79621c9dce17f77fbddc06aab21f6bc477f313/torch/_inductor/freezing.py#L178-L197 30 31 Folds constants in the graph module, not skipping constructors 32 33 Modifies the graph in-place and replaces node with constants 34 """ 35 cf = _TorchTensorRTConstantFolder(gm, skip_constructors=False) 36 cf.run() 37 38 for node, constant in cf.node_replacements.items(): 39 replace_node_with_constant(gm, node, constant) 40 41 erased_params = [] 42 for node in gm.graph.nodes: 43 # If get_attr node has no users, mark it for deletion 44 if node.op == "get_attr" and len(node.users) == 0: 45 erased_params.append(node) 46 47 # Remove unused nodes from the graph 48 for node in erased_params: 49 gm.graph.erase_node(node) 50 51 gm = clean_up_graph_after_modifications(gm) 52 53 logger.debug(f"Graph after constant folding:\n{gm.graph}") 54 55 return gm 56 57 58 # TODO: Delete this class when the following code is fixed in nightly: 59 # https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63 60 class _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc] 61 def __init__(self, *args: Any, **kwargs: Any) -> None: 62 super().__init__(*args, **kwargs) 63 64 # TODO: Update this function when quantization is added 65 def is_impure(self, node: torch.fx.node.Node) -> bool: 66 return False 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/py/torch_tensorrt/dynamo/conversion/_conversion.py b/py/torch_tensorrt/dynamo/conversion/_conversion.py --- a/py/torch_tensorrt/dynamo/conversion/_conversion.py +++ b/py/torch_tensorrt/dynamo/conversion/_conversion.py @@ -12,7 +12,7 @@ TRTInterpreterResult, ) from torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule, TorchTensorRTModule -from torch_tensorrt.dynamo.utils import get_torch_inputs, to_torch_device +from torch_tensorrt.dynamo.utils import get_torch_inputs def interpret_module_to_result( @@ -29,7 +29,6 @@ TRTInterpreterResult """ torch_inputs = get_torch_inputs(inputs, settings.device) - module.to(to_torch_device(settings.device)) module_outputs = module(*torch_inputs) if not isinstance(module_outputs, (list, tuple)): diff --git a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py --- a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py +++ b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py @@ -11,12 +11,9 @@ # Modify import location of utilities based on Torch version if version.parse(sanitized_torch_version()) < version.parse("2.1.1"): - from torch._inductor.freezing import ConstantFolder, replace_node_with_constant + from torch._inductor.freezing import ConstantFolder else: - from torch._inductor.constant_folding import ( - ConstantFolder, - replace_node_with_constant, - ) + from torch._inductor.constant_folding import ConstantFolder logger = logging.getLogger(__name__) @@ -36,7 +33,9 @@ cf.run() for node, constant in cf.node_replacements.items(): - replace_node_with_constant(gm, node, constant) + replace_node_with_constant( + gm, node, torch.nn.Parameter(constant.cuda(), requires_grad=False) + ) erased_params = [] for node in gm.graph.nodes: @@ -60,6 +59,40 @@ return gm +def replace_node_with_constant( + gm: torch.fx.GraphModule, node: torch.fx.Node, constant: torch.Tensor +) -> None: + """Adapted from: + https://github.com/pytorch/pytorch/blob/bcf35c6ae62bb6560befa3550e37a8283944e5f4/torch/_inductor/constant_folding.py#L17-L43 + + Modified to register parameters, instead of buffers for frozen constants + """ + g = gm.graph + + if not hasattr(gm, "_frozen_param_count"): + gm._frozen_param_count = 0 + + i = gm._frozen_param_count + + while True: + qualname = f"_frozen_param{i}" + if not hasattr(gm, qualname): + break + i += 1 + + gm._frozen_param_count = i + 1 + + with g.inserting_before(node): + new_input_node = g.create_node("get_attr", qualname, (), {}) + node.replace_all_uses_with(new_input_node) + new_input_node.meta.update(node.meta) + g.erase_node(node) + + # Needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning + gm.register_parameter(qualname, constant) + setattr(gm, qualname, constant) + + # TODO: Delete this class when the following code is fixed in nightly: # https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63 class _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/_conversion.py b/py/torch_tensorrt/dynamo/conversion/_conversion.py\n--- a/py/torch_tensorrt/dynamo/conversion/_conversion.py\n+++ b/py/torch_tensorrt/dynamo/conversion/_conversion.py\n@@ -12,7 +12,7 @@\n TRTInterpreterResult,\n )\n from torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule, TorchTensorRTModule\n-from torch_tensorrt.dynamo.utils import get_torch_inputs, to_torch_device\n+from torch_tensorrt.dynamo.utils import get_torch_inputs\n \n \n def interpret_module_to_result(\n@@ -29,7 +29,6 @@\n TRTInterpreterResult\n \"\"\"\n torch_inputs = get_torch_inputs(inputs, settings.device)\n- module.to(to_torch_device(settings.device))\n module_outputs = module(*torch_inputs)\n \n if not isinstance(module_outputs, (list, tuple)):\ndiff --git a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n--- a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n+++ b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n@@ -11,12 +11,9 @@\n \n # Modify import location of utilities based on Torch version\n if version.parse(sanitized_torch_version()) < version.parse(\"2.1.1\"):\n- from torch._inductor.freezing import ConstantFolder, replace_node_with_constant\n+ from torch._inductor.freezing import ConstantFolder\n else:\n- from torch._inductor.constant_folding import (\n- ConstantFolder,\n- replace_node_with_constant,\n- )\n+ from torch._inductor.constant_folding import ConstantFolder\n \n logger = logging.getLogger(__name__)\n \n@@ -36,7 +33,9 @@\n cf.run()\n \n for node, constant in cf.node_replacements.items():\n- replace_node_with_constant(gm, node, constant)\n+ replace_node_with_constant(\n+ gm, node, torch.nn.Parameter(constant.cuda(), requires_grad=False)\n+ )\n \n erased_params = []\n for node in gm.graph.nodes:\n@@ -60,6 +59,40 @@\n return gm\n \n \n+def replace_node_with_constant(\n+ gm: torch.fx.GraphModule, node: torch.fx.Node, constant: torch.Tensor\n+) -> None:\n+ \"\"\"Adapted from:\n+ https://github.com/pytorch/pytorch/blob/bcf35c6ae62bb6560befa3550e37a8283944e5f4/torch/_inductor/constant_folding.py#L17-L43\n+\n+ Modified to register parameters, instead of buffers for frozen constants\n+ \"\"\"\n+ g = gm.graph\n+\n+ if not hasattr(gm, \"_frozen_param_count\"):\n+ gm._frozen_param_count = 0\n+\n+ i = gm._frozen_param_count\n+\n+ while True:\n+ qualname = f\"_frozen_param{i}\"\n+ if not hasattr(gm, qualname):\n+ break\n+ i += 1\n+\n+ gm._frozen_param_count = i + 1\n+\n+ with g.inserting_before(node):\n+ new_input_node = g.create_node(\"get_attr\", qualname, (), {})\n+ node.replace_all_uses_with(new_input_node)\n+ new_input_node.meta.update(node.meta)\n+ g.erase_node(node)\n+\n+ # Needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning\n+ gm.register_parameter(qualname, constant)\n+ setattr(gm, qualname, constant)\n+\n+\n # TODO: Delete this class when the following code is fixed in nightly:\n # https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63\n class _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]\n", "issue": "\ud83d\udc1b [Bug] Issue in conversion when parameters/buffers are moved during compilation\n## Bug Description\r\n\r\n### Bug 1\r\n```python\r\n File \"/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch_tensorrt/dynamo/conversion/converter_utils.py\", line 491, in to_numpy\r\n output = value.cpu().detach().contiguous().numpy()\r\nRuntimeError: .numpy() is not supported for tensor subclasses.\r\n```\r\n### Suggested Fix 1\r\nNeed a custom version of the following function which registers a parameter, not a buffer\r\nhttps://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py#L39\r\n\r\n### Bug 2\r\n```python\r\nFile \"/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/_ops.py\", line 571, in __call__\r\n return self_._op(*args, **kwargs)\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and meta! (when checking argument for argument mat2 in method wrapper_CUDA_mm)\r\n```\r\n### Suggested Fix 2\r\nNeed to cast constant Tensors to `nn.Parameter` on `CUDA` at constant-folding time\r\nhttps://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py#L39\r\n\r\n### Bug 3\r\n```python\r\nFile \"<eval_with_key>.67 from /root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/fx/experimental/proxy_tensor.py:569 in wrapped\", line 11, in forward\r\nFile \"/root/.pyenv/versions/3.10.13/lib/python3.10/site-packages/torch/_ops.py\", line 571, in __call__\r\n return self_._op(*args, **kwargs)\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and meta! (when checking argument for argument mat2 in method wrapper_CUDA_mm)\r\n```\r\n### Suggested Fix 3\r\nThis line needs to be removed, as it has unintended behavior when casting constant params\r\nhttps://github.com/pytorch/TensorRT/blob/afd5abebbffa49107bcc7766c9f00bd6be2e593c/py/torch_tensorrt/dynamo/conversion/_conversion.py#L32\r\n\r\n## Expected behavior\r\n\r\nModel should compile\r\n\r\n## Environment\r\n\r\n - Torch and Torch-TensorRT Version: `2.3.0.dev2024222+cu121`\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport io\nfrom typing import Sequence\n\nimport tensorrt as trt\nimport torch\nfrom torch_tensorrt._Input import Input\nfrom torch_tensorrt.dynamo._settings import CompilationSettings\nfrom torch_tensorrt.dynamo.conversion._TRTInterpreter import (\n TRTInterpreter,\n TRTInterpreterResult,\n)\nfrom torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule, TorchTensorRTModule\nfrom torch_tensorrt.dynamo.utils import get_torch_inputs, to_torch_device\n\n\ndef interpret_module_to_result(\n module: torch.fx.GraphModule,\n inputs: Sequence[Input],\n settings: CompilationSettings = CompilationSettings(),\n) -> TRTInterpreterResult:\n \"\"\"Interpret an FX module to a TRTInterpreterResult\n Args:\n module: FX GraphModule to interpret\n inputs: Sequence of Tensors representing inputs to the module\n settings: Compilation settings\n Returns:\n TRTInterpreterResult\n \"\"\"\n torch_inputs = get_torch_inputs(inputs, settings.device)\n module.to(to_torch_device(settings.device))\n module_outputs = module(*torch_inputs)\n\n if not isinstance(module_outputs, (list, tuple)):\n module_outputs = [module_outputs]\n\n # Int64 outputs can sometimes be generated from within other operators\n # such as aten.sum - such outputs can be truncated\n output_dtypes = []\n for output in module_outputs:\n if settings.truncate_long_and_double and output.dtype == torch.float64:\n output_dtypes.append(torch.float32)\n elif settings.truncate_long_and_double and output.dtype == torch.int64:\n output_dtypes.append(torch.int32)\n else:\n output_dtypes.append(output.dtype)\n\n interpreter = TRTInterpreter(\n module,\n inputs,\n logger_level=(trt.Logger.VERBOSE if settings.debug else trt.Logger.WARNING),\n output_dtypes=output_dtypes,\n compilation_settings=settings,\n )\n interpreter_result = interpreter.run()\n return interpreter_result\n\n\ndef convert_module(\n module: torch.fx.GraphModule,\n inputs: Sequence[Input],\n settings: CompilationSettings = CompilationSettings(),\n name: str = \"\",\n) -> PythonTorchTensorRTModule | TorchTensorRTModule:\n \"\"\"Convert an FX module to a TRT module\n Args:\n module: FX GraphModule to convert\n inputs: Sequence of Tensors representing inputs to the module\n settings: Compilation settings\n name: TRT engine name\n Returns:\n _PythonTorchTensorRTModule or TorchTensorRTModule\n \"\"\"\n interpreter_result = interpret_module_to_result(module, inputs, settings)\n\n if settings.use_python_runtime:\n return PythonTorchTensorRTModule(\n engine=interpreter_result.engine,\n input_names=list(interpreter_result.input_names),\n output_names=list(interpreter_result.output_names),\n target_device=settings.device,\n profiling_enabled=settings.debug,\n )\n\n else:\n from torch_tensorrt.dynamo.runtime import TorchTensorRTModule\n\n with io.BytesIO() as engine_bytes:\n engine_bytes.write(interpreter_result.engine.serialize())\n engine_str = engine_bytes.getvalue()\n return TorchTensorRTModule(\n serialized_engine=engine_str,\n name=name,\n input_binding_names=list(interpreter_result.input_names),\n output_binding_names=list(interpreter_result.output_names),\n target_device=settings.device,\n hardware_compatible=settings.hardware_compatible,\n )\n", "path": "py/torch_tensorrt/dynamo/conversion/_conversion.py"}, {"content": "import logging\nfrom typing import Any, Sequence\n\nimport torch\nfrom torch_tensorrt._utils import sanitized_torch_version\nfrom torch_tensorrt.dynamo.lowering.passes.pass_utils import (\n clean_up_graph_after_modifications,\n)\n\nfrom packaging import version\n\n# Modify import location of utilities based on Torch version\nif version.parse(sanitized_torch_version()) < version.parse(\"2.1.1\"):\n from torch._inductor.freezing import ConstantFolder, replace_node_with_constant\nelse:\n from torch._inductor.constant_folding import (\n ConstantFolder,\n replace_node_with_constant,\n )\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]._python_dispatch._disable_current_modes() # type: ignore\ndef constant_fold(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor]\n) -> torch.fx.GraphModule:\n \"\"\"Adapted from:\n https://github.com/pytorch/pytorch/blob/3a79621c9dce17f77fbddc06aab21f6bc477f313/torch/_inductor/freezing.py#L178-L197\n\n Folds constants in the graph module, not skipping constructors\n\n Modifies the graph in-place and replaces node with constants\n \"\"\"\n cf = _TorchTensorRTConstantFolder(gm, skip_constructors=False)\n cf.run()\n\n for node, constant in cf.node_replacements.items():\n replace_node_with_constant(gm, node, constant)\n\n erased_params = []\n for node in gm.graph.nodes:\n # If get_attr node has no users, mark it for deletion\n if node.op == \"get_attr\" and len(node.users) == 0:\n erased_params.append(node)\n\n # Remove unused nodes from the graph\n for node in erased_params:\n gm.graph.erase_node(node)\n\n gm = clean_up_graph_after_modifications(gm)\n\n logger.debug(f\"Graph after constant folding:\\n{gm.graph}\")\n\n return gm\n\n\n# TODO: Delete this class when the following code is fixed in nightly:\n# https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63\nclass _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n\n # TODO: Update this function when quantization is added\n def is_impure(self, node: torch.fx.node.Node) -> bool:\n return False\n", "path": "py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport io\nfrom typing import Sequence\n\nimport tensorrt as trt\nimport torch\nfrom torch_tensorrt._Input import Input\nfrom torch_tensorrt.dynamo._settings import CompilationSettings\nfrom torch_tensorrt.dynamo.conversion._TRTInterpreter import (\n TRTInterpreter,\n TRTInterpreterResult,\n)\nfrom torch_tensorrt.dynamo.runtime import PythonTorchTensorRTModule, TorchTensorRTModule\nfrom torch_tensorrt.dynamo.utils import get_torch_inputs\n\n\ndef interpret_module_to_result(\n module: torch.fx.GraphModule,\n inputs: Sequence[Input],\n settings: CompilationSettings = CompilationSettings(),\n) -> TRTInterpreterResult:\n \"\"\"Interpret an FX module to a TRTInterpreterResult\n Args:\n module: FX GraphModule to interpret\n inputs: Sequence of Tensors representing inputs to the module\n settings: Compilation settings\n Returns:\n TRTInterpreterResult\n \"\"\"\n torch_inputs = get_torch_inputs(inputs, settings.device)\n module_outputs = module(*torch_inputs)\n\n if not isinstance(module_outputs, (list, tuple)):\n module_outputs = [module_outputs]\n\n # Int64 outputs can sometimes be generated from within other operators\n # such as aten.sum - such outputs can be truncated\n output_dtypes = []\n for output in module_outputs:\n if settings.truncate_long_and_double and output.dtype == torch.float64:\n output_dtypes.append(torch.float32)\n elif settings.truncate_long_and_double and output.dtype == torch.int64:\n output_dtypes.append(torch.int32)\n else:\n output_dtypes.append(output.dtype)\n\n interpreter = TRTInterpreter(\n module,\n inputs,\n logger_level=(trt.Logger.VERBOSE if settings.debug else trt.Logger.WARNING),\n output_dtypes=output_dtypes,\n compilation_settings=settings,\n )\n interpreter_result = interpreter.run()\n return interpreter_result\n\n\ndef convert_module(\n module: torch.fx.GraphModule,\n inputs: Sequence[Input],\n settings: CompilationSettings = CompilationSettings(),\n name: str = \"\",\n) -> PythonTorchTensorRTModule | TorchTensorRTModule:\n \"\"\"Convert an FX module to a TRT module\n Args:\n module: FX GraphModule to convert\n inputs: Sequence of Tensors representing inputs to the module\n settings: Compilation settings\n name: TRT engine name\n Returns:\n _PythonTorchTensorRTModule or TorchTensorRTModule\n \"\"\"\n interpreter_result = interpret_module_to_result(module, inputs, settings)\n\n if settings.use_python_runtime:\n return PythonTorchTensorRTModule(\n engine=interpreter_result.engine,\n input_names=list(interpreter_result.input_names),\n output_names=list(interpreter_result.output_names),\n target_device=settings.device,\n profiling_enabled=settings.debug,\n )\n\n else:\n from torch_tensorrt.dynamo.runtime import TorchTensorRTModule\n\n with io.BytesIO() as engine_bytes:\n engine_bytes.write(interpreter_result.engine.serialize())\n engine_str = engine_bytes.getvalue()\n return TorchTensorRTModule(\n serialized_engine=engine_str,\n name=name,\n input_binding_names=list(interpreter_result.input_names),\n output_binding_names=list(interpreter_result.output_names),\n target_device=settings.device,\n hardware_compatible=settings.hardware_compatible,\n )\n", "path": "py/torch_tensorrt/dynamo/conversion/_conversion.py"}, {"content": "import logging\nfrom typing import Any, Sequence\n\nimport torch\nfrom torch_tensorrt._utils import sanitized_torch_version\nfrom torch_tensorrt.dynamo.lowering.passes.pass_utils import (\n clean_up_graph_after_modifications,\n)\n\nfrom packaging import version\n\n# Modify import location of utilities based on Torch version\nif version.parse(sanitized_torch_version()) < version.parse(\"2.1.1\"):\n from torch._inductor.freezing import ConstantFolder\nelse:\n from torch._inductor.constant_folding import ConstantFolder\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]._python_dispatch._disable_current_modes() # type: ignore\ndef constant_fold(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor]\n) -> torch.fx.GraphModule:\n \"\"\"Adapted from:\n https://github.com/pytorch/pytorch/blob/3a79621c9dce17f77fbddc06aab21f6bc477f313/torch/_inductor/freezing.py#L178-L197\n\n Folds constants in the graph module, not skipping constructors\n\n Modifies the graph in-place and replaces node with constants\n \"\"\"\n cf = _TorchTensorRTConstantFolder(gm, skip_constructors=False)\n cf.run()\n\n for node, constant in cf.node_replacements.items():\n replace_node_with_constant(\n gm, node, torch.nn.Parameter(constant.cuda(), requires_grad=False)\n )\n\n erased_params = []\n for node in gm.graph.nodes:\n # If get_attr node has no users, mark it for deletion\n if node.op == \"get_attr\" and len(node.users) == 0:\n # If the node's parameter is not a parameter of any other node, remove it\n if not any(\n other.target == node.target for other in gm.graph.nodes if other != node\n ):\n delattr(gm, node.target)\n erased_params.append(node)\n\n # Remove unused nodes from the graph\n for node in erased_params:\n gm.graph.erase_node(node)\n\n gm = clean_up_graph_after_modifications(gm)\n\n logger.debug(f\"Graph after constant folding:\\n{gm.graph}\")\n\n return gm\n\n\ndef replace_node_with_constant(\n gm: torch.fx.GraphModule, node: torch.fx.Node, constant: torch.Tensor\n) -> None:\n \"\"\"Adapted from:\n https://github.com/pytorch/pytorch/blob/bcf35c6ae62bb6560befa3550e37a8283944e5f4/torch/_inductor/constant_folding.py#L17-L43\n\n Modified to register parameters, instead of buffers for frozen constants\n \"\"\"\n g = gm.graph\n\n if not hasattr(gm, \"_frozen_param_count\"):\n gm._frozen_param_count = 0\n\n i = gm._frozen_param_count\n\n while True:\n qualname = f\"_frozen_param{i}\"\n if not hasattr(gm, qualname):\n break\n i += 1\n\n gm._frozen_param_count = i + 1\n\n with g.inserting_before(node):\n new_input_node = g.create_node(\"get_attr\", qualname, (), {})\n node.replace_all_uses_with(new_input_node)\n new_input_node.meta.update(node.meta)\n g.erase_node(node)\n\n # Needed to suppress `does not reference an nn.Module, nn.Parameter, or buffer` warning\n gm.register_parameter(qualname, constant)\n setattr(gm, qualname, constant)\n\n\n# TODO: Delete this class when the following code is fixed in nightly:\n# https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63\nclass _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n\n # TODO: Update this function when quantization is added\n def is_impure(self, node: torch.fx.node.Node) -> bool:\n return False\n", "path": "py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py"}]}
2,600
930